Skip to content

Commit

Permalink
init vita code repos
Browse files Browse the repository at this point in the history
  • Loading branch information
haiduo committed Jul 9, 2022
1 parent 3fe7c33 commit 8b80892
Show file tree
Hide file tree
Showing 279 changed files with 22,248 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
MODEL:
BACKBONE:
FREEZE_AT: 0
NAME: "build_resnet_backbone"
WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STEM_TYPE: "basic" # not used
STEM_OUT_CHANNELS: 64
STRIDE_IN_1X1: False
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
# NORM: "SyncBN"
RES5_MULTI_GRID: [1, 1, 1] # not used
DATASETS:
TRAIN: ("ade20k_instance_train",)
TEST: ("ade20k_instance_val",)
SOLVER:
IMS_PER_BATCH: 16
BASE_LR: 0.0001
MAX_ITER: 160000
WARMUP_FACTOR: 1.0
WARMUP_ITERS: 0
WEIGHT_DECAY: 0.05
OPTIMIZER: "ADAMW"
LR_SCHEDULER_NAME: "WarmupPolyLR"
BACKBONE_MULTIPLIER: 0.1
CLIP_GRADIENTS:
ENABLED: True
CLIP_TYPE: "full_model"
CLIP_VALUE: 0.01
NORM_TYPE: 2.0
AMP:
ENABLED: True
INPUT:
MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 21)]"]
MIN_SIZE_TRAIN_SAMPLING: "choice"
MIN_SIZE_TEST: 640
MAX_SIZE_TRAIN: 2560
MAX_SIZE_TEST: 2560
CROP:
ENABLED: True
TYPE: "absolute"
SIZE: (640, 640)
SINGLE_CATEGORY_MAX_AREA: 1.0
COLOR_AUG_SSD: True
SIZE_DIVISIBILITY: 640 # used in dataset mapper
FORMAT: "RGB"
DATASET_MAPPER_NAME: "mask_former_instance"
TEST:
EVAL_PERIOD: 5000
AUG:
ENABLED: False
MIN_SIZES: [320, 480, 640, 800, 960, 1120]
MAX_SIZE: 4480
FLIP: True
DATALOADER:
FILTER_EMPTY_ANNOTATIONS: True
NUM_WORKERS: 4
VERSION: 2
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
_BASE_: Base-ADE20K-InstanceSegmentation.yaml
MODEL:
META_ARCHITECTURE: "MaskFormer"
SEM_SEG_HEAD:
NAME: "MaskFormerHead"
IGNORE_VALUE: 255
NUM_CLASSES: 100
LOSS_WEIGHT: 1.0
CONVS_DIM: 256
MASK_DIM: 256
NORM: "GN"
# pixel decoder
PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
IN_FEATURES: ["res2", "res3", "res4", "res5"]
DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
COMMON_STRIDE: 4
TRANSFORMER_ENC_LAYERS: 6
MASK_FORMER:
TRANSFORMER_DECODER_NAME: "MultiScaleMaskedTransformerDecoder"
TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
DEEP_SUPERVISION: True
NO_OBJECT_WEIGHT: 0.1
CLASS_WEIGHT: 2.0
MASK_WEIGHT: 5.0
DICE_WEIGHT: 5.0
HIDDEN_DIM: 256
NUM_OBJECT_QUERIES: 100
NHEADS: 8
DROPOUT: 0.0
DIM_FEEDFORWARD: 2048
ENC_LAYERS: 0
PRE_NORM: False
ENFORCE_INPUT_PROJ: False
SIZE_DIVISIBILITY: 32
DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
TRAIN_NUM_POINTS: 12544
OVERSAMPLE_RATIO: 3.0
IMPORTANCE_SAMPLE_RATIO: 0.75
TEST:
SEMANTIC_ON: True
INSTANCE_ON: True
PANOPTIC_ON: True
OVERLAP_THRESHOLD: 0.8
OBJECT_MASK_THRESHOLD: 0.8
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
_BASE_: ../maskformer2_R50_bs16_160k.yaml
MODEL:
BACKBONE:
NAME: "D2SwinTransformer"
SWIN:
EMBED_DIM: 192
DEPTHS: [2, 2, 18, 2]
NUM_HEADS: [6, 12, 24, 48]
WINDOW_SIZE: 12
APE: False
DROP_PATH_RATE: 0.3
PATCH_NORM: True
PRETRAIN_IMG_SIZE: 384
WEIGHTS: "swin_large_patch4_window12_384_22k.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
MASK_FORMER:
NUM_OBJECT_QUERIES: 200
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
MODEL:
BACKBONE:
FREEZE_AT: 0
NAME: "build_resnet_backbone"
WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STEM_TYPE: "basic" # not used
STEM_OUT_CHANNELS: 64
STRIDE_IN_1X1: False
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
# NORM: "SyncBN"
RES5_MULTI_GRID: [1, 1, 1] # not used
DATASETS:
TRAIN: ("ade20k_panoptic_train",)
TEST: ("ade20k_panoptic_val",)
SOLVER:
IMS_PER_BATCH: 16
BASE_LR: 0.0001
MAX_ITER: 160000
WARMUP_FACTOR: 1.0
WARMUP_ITERS: 0
WEIGHT_DECAY: 0.05
OPTIMIZER: "ADAMW"
LR_SCHEDULER_NAME: "WarmupPolyLR"
BACKBONE_MULTIPLIER: 0.1
CLIP_GRADIENTS:
ENABLED: True
CLIP_TYPE: "full_model"
CLIP_VALUE: 0.01
NORM_TYPE: 2.0
AMP:
ENABLED: True
INPUT:
MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 21)]"]
MIN_SIZE_TRAIN_SAMPLING: "choice"
MIN_SIZE_TEST: 640
MAX_SIZE_TRAIN: 2560
MAX_SIZE_TEST: 2560
CROP:
ENABLED: True
TYPE: "absolute"
SIZE: (640, 640)
SINGLE_CATEGORY_MAX_AREA: 1.0
COLOR_AUG_SSD: True
SIZE_DIVISIBILITY: 640 # used in dataset mapper
FORMAT: "RGB"
DATASET_MAPPER_NAME: "mask_former_panoptic"
TEST:
EVAL_PERIOD: 5000
AUG:
ENABLED: False
MIN_SIZES: [320, 480, 640, 800, 960, 1120]
MAX_SIZE: 4480
FLIP: True
DATALOADER:
FILTER_EMPTY_ANNOTATIONS: True
NUM_WORKERS: 4
VERSION: 2
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
_BASE_: Base-ADE20K-PanopticSegmentation.yaml
MODEL:
META_ARCHITECTURE: "MaskFormer"
SEM_SEG_HEAD:
NAME: "MaskFormerHead"
IGNORE_VALUE: 255
NUM_CLASSES: 150
LOSS_WEIGHT: 1.0
CONVS_DIM: 256
MASK_DIM: 256
NORM: "GN"
# pixel decoder
PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
IN_FEATURES: ["res2", "res3", "res4", "res5"]
DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
COMMON_STRIDE: 4
TRANSFORMER_ENC_LAYERS: 6
MASK_FORMER:
TRANSFORMER_DECODER_NAME: "MultiScaleMaskedTransformerDecoder"
TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
DEEP_SUPERVISION: True
NO_OBJECT_WEIGHT: 0.1
CLASS_WEIGHT: 2.0
MASK_WEIGHT: 5.0
DICE_WEIGHT: 5.0
HIDDEN_DIM: 256
NUM_OBJECT_QUERIES: 100
NHEADS: 8
DROPOUT: 0.0
DIM_FEEDFORWARD: 2048
ENC_LAYERS: 0
PRE_NORM: False
ENFORCE_INPUT_PROJ: False
SIZE_DIVISIBILITY: 32
DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
TRAIN_NUM_POINTS: 12544
OVERSAMPLE_RATIO: 3.0
IMPORTANCE_SAMPLE_RATIO: 0.75
TEST:
SEMANTIC_ON: True
INSTANCE_ON: True
PANOPTIC_ON: True
OVERLAP_THRESHOLD: 0.8
OBJECT_MASK_THRESHOLD: 0.8
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
_BASE_: ../maskformer2_R50_bs16_160k.yaml
MODEL:
BACKBONE:
NAME: "D2SwinTransformer"
SWIN:
EMBED_DIM: 192
DEPTHS: [2, 2, 18, 2]
NUM_HEADS: [6, 12, 24, 48]
WINDOW_SIZE: 12
APE: False
DROP_PATH_RATE: 0.3
PATCH_NORM: True
PRETRAIN_IMG_SIZE: 384
WEIGHTS: "swin_large_patch4_window12_384_22k.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
MASK_FORMER:
NUM_OBJECT_QUERIES: 200
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
MODEL:
BACKBONE:
FREEZE_AT: 0
NAME: "build_resnet_backbone"
WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STEM_TYPE: "basic" # not used
STEM_OUT_CHANNELS: 64
STRIDE_IN_1X1: False
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
# NORM: "SyncBN"
RES5_MULTI_GRID: [1, 1, 1] # not used
DATASETS:
TRAIN: ("ade20k_sem_seg_train",)
TEST: ("ade20k_sem_seg_val",)
SOLVER:
IMS_PER_BATCH: 16
BASE_LR: 0.0001
MAX_ITER: 160000
WARMUP_FACTOR: 1.0
WARMUP_ITERS: 0
WEIGHT_DECAY: 0.05
OPTIMIZER: "ADAMW"
LR_SCHEDULER_NAME: "WarmupPolyLR"
BACKBONE_MULTIPLIER: 0.1
CLIP_GRADIENTS:
ENABLED: True
CLIP_TYPE: "full_model"
CLIP_VALUE: 0.01
NORM_TYPE: 2.0
AMP:
ENABLED: True
INPUT:
MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 512) for x in range(5, 21)]"]
MIN_SIZE_TRAIN_SAMPLING: "choice"
MIN_SIZE_TEST: 512
MAX_SIZE_TRAIN: 2048
MAX_SIZE_TEST: 2048
CROP:
ENABLED: True
TYPE: "absolute"
SIZE: (512, 512)
SINGLE_CATEGORY_MAX_AREA: 1.0
COLOR_AUG_SSD: True
SIZE_DIVISIBILITY: 512 # used in dataset mapper
FORMAT: "RGB"
DATASET_MAPPER_NAME: "mask_former_semantic"
TEST:
EVAL_PERIOD: 5000
AUG:
ENABLED: False
MIN_SIZES: [256, 384, 512, 640, 768, 896]
MAX_SIZE: 3584
FLIP: True
DATALOADER:
FILTER_EMPTY_ANNOTATIONS: True
NUM_WORKERS: 4
VERSION: 2
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
_BASE_: maskformer2_R50_bs16_160k.yaml
MODEL:
WEIGHTS: "R-101.pkl"
RESNETS:
DEPTH: 101
STEM_TYPE: "basic" # not used
STEM_OUT_CHANNELS: 64
STRIDE_IN_1X1: False
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
NORM: "SyncBN"
RES5_MULTI_GRID: [1, 1, 1] # not used
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
_BASE_: Base-ADE20K-SemanticSegmentation.yaml
MODEL:
META_ARCHITECTURE: "MaskFormer"
SEM_SEG_HEAD:
NAME: "MaskFormerHead"
IGNORE_VALUE: 255
NUM_CLASSES: 150
LOSS_WEIGHT: 1.0
CONVS_DIM: 256
MASK_DIM: 256
NORM: "GN"
# pixel decoder
PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
IN_FEATURES: ["res2", "res3", "res4", "res5"]
DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
COMMON_STRIDE: 4
TRANSFORMER_ENC_LAYERS: 6
MASK_FORMER:
TRANSFORMER_DECODER_NAME: "MultiScaleMaskedTransformerDecoder"
TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
DEEP_SUPERVISION: True
NO_OBJECT_WEIGHT: 0.1
CLASS_WEIGHT: 2.0
MASK_WEIGHT: 5.0
DICE_WEIGHT: 5.0
HIDDEN_DIM: 256
NUM_OBJECT_QUERIES: 100
NHEADS: 8
DROPOUT: 0.0
DIM_FEEDFORWARD: 2048
ENC_LAYERS: 0
PRE_NORM: False
ENFORCE_INPUT_PROJ: False
SIZE_DIVISIBILITY: 32
DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
TRAIN_NUM_POINTS: 12544
OVERSAMPLE_RATIO: 3.0
IMPORTANCE_SAMPLE_RATIO: 0.75
TEST:
SEMANTIC_ON: True
INSTANCE_ON: False
PANOPTIC_ON: False
OVERLAP_THRESHOLD: 0.8
OBJECT_MASK_THRESHOLD: 0.8
Loading

0 comments on commit 8b80892

Please sign in to comment.