Skip to content

Commit e7c5807

Browse files
committed
upload segmentation code
1 parent f0bbcad commit e7c5807

File tree

161 files changed

+16068
-1
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

161 files changed

+16068
-1
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
exclude: ^detection/configs
1+
exclude: ^detection/configs, ^segmentation/configs
22
repos:
33
- repo: https://gitlab.com/pycqa/flake8.git
44
rev: 3.8.3
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# dataset settings
2+
dataset_type = 'ADE20KDataset'
3+
data_root = 'data/ade/ADEChallengeData2016'
4+
img_norm_cfg = dict(
5+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6+
crop_size = (512, 512)
7+
train_pipeline = [
8+
dict(type='LoadImageFromFile'),
9+
dict(type='LoadAnnotations', reduce_zero_label=True),
10+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
11+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
12+
dict(type='RandomFlip', prob=0.5),
13+
dict(type='PhotoMetricDistortion'),
14+
dict(type='Normalize', **img_norm_cfg),
15+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
16+
dict(type='DefaultFormatBundle'),
17+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
18+
]
19+
test_pipeline = [
20+
dict(type='LoadImageFromFile'),
21+
dict(
22+
type='MultiScaleFlipAug',
23+
img_scale=(2048, 512),
24+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
25+
flip=False,
26+
transforms=[
27+
dict(type='Resize', keep_ratio=True),
28+
dict(type='RandomFlip'),
29+
dict(type='Normalize', **img_norm_cfg),
30+
dict(type='ImageToTensor', keys=['img']),
31+
dict(type='Collect', keys=['img']),
32+
])
33+
]
34+
data = dict(
35+
samples_per_gpu=4,
36+
workers_per_gpu=4,
37+
train=dict(
38+
type=dataset_type,
39+
data_root=data_root,
40+
img_dir='images/training',
41+
ann_dir='annotations/training',
42+
pipeline=train_pipeline),
43+
val=dict(
44+
type=dataset_type,
45+
data_root=data_root,
46+
img_dir='images/validation',
47+
ann_dir='annotations/validation',
48+
pipeline=test_pipeline),
49+
test=dict(
50+
type=dataset_type,
51+
data_root=data_root,
52+
img_dir='images/validation',
53+
ann_dir='annotations/validation',
54+
pipeline=test_pipeline))
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# dataset settings
2+
dataset_type = 'ChaseDB1Dataset'
3+
data_root = 'data/CHASE_DB1'
4+
img_norm_cfg = dict(
5+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6+
img_scale = (960, 999)
7+
crop_size = (128, 128)
8+
train_pipeline = [
9+
dict(type='LoadImageFromFile'),
10+
dict(type='LoadAnnotations'),
11+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
12+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
13+
dict(type='RandomFlip', prob=0.5),
14+
dict(type='PhotoMetricDistortion'),
15+
dict(type='Normalize', **img_norm_cfg),
16+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
17+
dict(type='DefaultFormatBundle'),
18+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
19+
]
20+
test_pipeline = [
21+
dict(type='LoadImageFromFile'),
22+
dict(
23+
type='MultiScaleFlipAug',
24+
img_scale=img_scale,
25+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
26+
flip=False,
27+
transforms=[
28+
dict(type='Resize', keep_ratio=True),
29+
dict(type='RandomFlip'),
30+
dict(type='Normalize', **img_norm_cfg),
31+
dict(type='ImageToTensor', keys=['img']),
32+
dict(type='Collect', keys=['img'])
33+
])
34+
]
35+
36+
data = dict(
37+
samples_per_gpu=4,
38+
workers_per_gpu=4,
39+
train=dict(
40+
type='RepeatDataset',
41+
times=40000,
42+
dataset=dict(
43+
type=dataset_type,
44+
data_root=data_root,
45+
img_dir='images/training',
46+
ann_dir='annotations/training',
47+
pipeline=train_pipeline)),
48+
val=dict(
49+
type=dataset_type,
50+
data_root=data_root,
51+
img_dir='images/validation',
52+
ann_dir='annotations/validation',
53+
pipeline=test_pipeline),
54+
test=dict(
55+
type=dataset_type,
56+
data_root=data_root,
57+
img_dir='images/validation',
58+
ann_dir='annotations/validation',
59+
pipeline=test_pipeline))
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# dataset settings
2+
dataset_type = 'CityscapesDataset'
3+
data_root = 'data/cityscapes/'
4+
img_norm_cfg = dict(
5+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6+
crop_size = (512, 1024)
7+
train_pipeline = [
8+
dict(type='LoadImageFromFile'),
9+
dict(type='LoadAnnotations'),
10+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
11+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
12+
dict(type='RandomFlip', prob=0.5),
13+
dict(type='PhotoMetricDistortion'),
14+
dict(type='Normalize', **img_norm_cfg),
15+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
16+
dict(type='DefaultFormatBundle'),
17+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
18+
]
19+
test_pipeline = [
20+
dict(type='LoadImageFromFile'),
21+
dict(
22+
type='MultiScaleFlipAug',
23+
img_scale=(2048, 1024),
24+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
25+
flip=False,
26+
transforms=[
27+
dict(type='Resize', keep_ratio=True),
28+
dict(type='RandomFlip'),
29+
dict(type='Normalize', **img_norm_cfg),
30+
dict(type='ImageToTensor', keys=['img']),
31+
dict(type='Collect', keys=['img']),
32+
])
33+
]
34+
data = dict(
35+
samples_per_gpu=2,
36+
workers_per_gpu=2,
37+
train=dict(
38+
type=dataset_type,
39+
data_root=data_root,
40+
img_dir='leftImg8bit/train',
41+
ann_dir='gtFine/train',
42+
pipeline=train_pipeline),
43+
val=dict(
44+
type=dataset_type,
45+
data_root=data_root,
46+
img_dir='leftImg8bit/val',
47+
ann_dir='gtFine/val',
48+
pipeline=test_pipeline),
49+
test=dict(
50+
type=dataset_type,
51+
data_root=data_root,
52+
img_dir='leftImg8bit/val',
53+
ann_dir='gtFine/val',
54+
pipeline=test_pipeline))
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
_base_ = './cityscapes.py'
2+
img_norm_cfg = dict(
3+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4+
crop_size = (1024, 1024)
5+
train_pipeline = [
6+
dict(type='LoadImageFromFile'),
7+
dict(type='LoadAnnotations'),
8+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
9+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='PhotoMetricDistortion'),
12+
dict(type='Normalize', **img_norm_cfg),
13+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14+
dict(type='DefaultFormatBundle'),
15+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16+
]
17+
test_pipeline = [
18+
dict(type='LoadImageFromFile'),
19+
dict(
20+
type='MultiScaleFlipAug',
21+
img_scale=(2048, 1024),
22+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23+
flip=False,
24+
transforms=[
25+
dict(type='Resize', keep_ratio=True),
26+
dict(type='RandomFlip'),
27+
dict(type='Normalize', **img_norm_cfg),
28+
dict(type='ImageToTensor', keys=['img']),
29+
dict(type='Collect', keys=['img']),
30+
])
31+
]
32+
data = dict(
33+
train=dict(pipeline=train_pipeline),
34+
val=dict(pipeline=test_pipeline),
35+
test=dict(pipeline=test_pipeline))
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
_base_ = './cityscapes.py'
2+
img_norm_cfg = dict(
3+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4+
crop_size = (768, 768)
5+
train_pipeline = [
6+
dict(type='LoadImageFromFile'),
7+
dict(type='LoadAnnotations'),
8+
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
9+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='PhotoMetricDistortion'),
12+
dict(type='Normalize', **img_norm_cfg),
13+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14+
dict(type='DefaultFormatBundle'),
15+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16+
]
17+
test_pipeline = [
18+
dict(type='LoadImageFromFile'),
19+
dict(
20+
type='MultiScaleFlipAug',
21+
img_scale=(2049, 1025),
22+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23+
flip=False,
24+
transforms=[
25+
dict(type='Resize', keep_ratio=True),
26+
dict(type='RandomFlip'),
27+
dict(type='Normalize', **img_norm_cfg),
28+
dict(type='ImageToTensor', keys=['img']),
29+
dict(type='Collect', keys=['img']),
30+
])
31+
]
32+
data = dict(
33+
train=dict(pipeline=train_pipeline),
34+
val=dict(pipeline=test_pipeline),
35+
test=dict(pipeline=test_pipeline))
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
_base_ = './cityscapes.py'
2+
img_norm_cfg = dict(
3+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4+
crop_size = (769, 769)
5+
train_pipeline = [
6+
dict(type='LoadImageFromFile'),
7+
dict(type='LoadAnnotations'),
8+
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
9+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='PhotoMetricDistortion'),
12+
dict(type='Normalize', **img_norm_cfg),
13+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14+
dict(type='DefaultFormatBundle'),
15+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16+
]
17+
test_pipeline = [
18+
dict(type='LoadImageFromFile'),
19+
dict(
20+
type='MultiScaleFlipAug',
21+
img_scale=(2049, 1025),
22+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23+
flip=False,
24+
transforms=[
25+
dict(type='Resize', keep_ratio=True),
26+
dict(type='RandomFlip'),
27+
dict(type='Normalize', **img_norm_cfg),
28+
dict(type='ImageToTensor', keys=['img']),
29+
dict(type='Collect', keys=['img']),
30+
])
31+
]
32+
data = dict(
33+
train=dict(pipeline=train_pipeline),
34+
val=dict(pipeline=test_pipeline),
35+
test=dict(pipeline=test_pipeline))
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
_base_ = './cityscapes.py'
2+
img_norm_cfg = dict(
3+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4+
crop_size = (832, 832)
5+
train_pipeline = [
6+
dict(type='LoadImageFromFile'),
7+
dict(type='LoadAnnotations'),
8+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
9+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='PhotoMetricDistortion'),
12+
dict(type='Normalize', **img_norm_cfg),
13+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14+
dict(type='DefaultFormatBundle'),
15+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16+
]
17+
test_pipeline = [
18+
dict(type='LoadImageFromFile'),
19+
dict(
20+
type='MultiScaleFlipAug',
21+
img_scale=(2048, 1024),
22+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23+
flip=False,
24+
transforms=[
25+
dict(type='Resize', keep_ratio=True),
26+
dict(type='RandomFlip'),
27+
dict(type='Normalize', **img_norm_cfg),
28+
dict(type='ImageToTensor', keys=['img']),
29+
dict(type='Collect', keys=['img']),
30+
])
31+
]
32+
data = dict(
33+
train=dict(pipeline=train_pipeline),
34+
val=dict(pipeline=test_pipeline),
35+
test=dict(pipeline=test_pipeline))
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
_base_ = './cityscapes.py'
2+
img_norm_cfg = dict(
3+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4+
crop_size = (896, 896)
5+
train_pipeline = [
6+
dict(type='LoadImageFromFile'),
7+
dict(type='LoadAnnotations'),
8+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
9+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='PhotoMetricDistortion'),
12+
dict(type='Normalize', **img_norm_cfg),
13+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14+
dict(type='DefaultFormatBundle'),
15+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16+
]
17+
test_pipeline = [
18+
dict(type='LoadImageFromFile'),
19+
dict(
20+
type='MultiScaleFlipAug',
21+
img_scale=(2048, 1024),
22+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23+
flip=False,
24+
transforms=[
25+
dict(type='Resize', keep_ratio=True),
26+
dict(type='RandomFlip'),
27+
dict(type='Normalize', **img_norm_cfg),
28+
dict(type='ImageToTensor', keys=['img']),
29+
dict(type='Collect', keys=['img']),
30+
])
31+
]
32+
data = dict(
33+
train=dict(pipeline=train_pipeline),
34+
val=dict(pipeline=test_pipeline),
35+
test=dict(pipeline=test_pipeline))

0 commit comments

Comments
 (0)