# 图像剪裁大小
crop_size = (
512,
512,
)
data_preprocessor = dict(
bgr_to_rgb=True,
# 转换图像颜色通道(BGR->RGB),因为OpenCV读取图像时使用BGR顺序,转换为RGB以适应大多数深度学习框架。
mean=[
123.675,
116.28,
103.53,
],
# mean表示输入图像的均值(针对每个颜色通道)。用于数据标准化,减少不同图像间的光照差异。
pad_val=0,
seg_pad_val=255,
size=(
512,
1024,
),
std=[
58.395,
57.12,
57.375,
], # std输入图像的标准差
type='SegDataPreProcessor')
data_root = 'Watermelon87_Semantic_Seg_Mask/' # 数据集地址
dataset_type = 'ZihaoDataset' # 数据集类名
default_hooks = dict(
checkpoint=dict(
by_epoch=False,
interval=20,
max_keep_ckpts=2,
save_best='mIoU',
type='CheckpointHook'),
logger=dict(interval=20, log_metric_by_epoch=False, type='LoggerHook'),
param_scheduler=dict(type='ParamSchedulerHook'),
sampler_seed=dict(type='DistSamplerSeedHook'),
timer=dict(type='IterTimerHook'),
visualization=dict(type='SegVisualizationHook'))
# checkpoint: 定义检查点保存策略,例如每20个epoch保存一次模型,并保持最新的两个检查点。
# logger: 设置日志记录的频率,interval=20表示每20个步骤记录一次日志。
# param_scheduler: 用于调整学习率的调度器。
# sampler_seed: 用于分布式训练时的种子配置。
# timer: 计时器,用于训练过程中的时间统计。
# visualization: 用于每个训练步骤可视化分割结果。
default_scope = 'mmseg'
env_cfg = dict(
cudnn_benchmark=True,
dist_cfg=dict(backend='nccl'),
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
img_ratios = [
0.5,
0.75,
1.0,
1.25,
1.5,
1.75,
]
load_from = None
log_level = 'INFO'
log_processor = dict(by_epoch=False)
model = dict(
auxiliary_head=[
dict(
align_corners=False,
channels=32,
concat_input=False,
in_channels=128,
in_index=-2,
loss_decode=dict(
loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=True),
norm_cfg=dict(requires_grad=True, type='BN'),
num_classes=6,
num_convs=1,
type='FCNHead'),
dict(
align_corners=False,
channels=32,
concat_input=False,
in_channels=64,
in_index=-3,
loss_decode=dict(
loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=True),
norm_cfg=dict(requires_grad=True, type='BN'),
num_classes=6,
num_convs=1,
type='FCNHead'),
],
backbone=dict(
align_corners=False,
downsample_dw_channels=(
32,
48,
),
fusion_out_channels=128,
global_block_channels=(
64,
96,
128,
),
global_block_strides=(
2,
2,
1,
),
global_in_channels=64,
global_out_channels=128,
higher_in_channels=64,
lower_in_channels=128,
norm_cfg=dict(requires_grad=True, type='BN'),
out_indices=(
0,
1,
2,
),
type='FastSCNN'),
data_preprocessor=dict(
bgr_to_rgb=True,
mean=[
123.675,
116.28,
103.53,
],
pad_val=0,
seg_pad_val=255,
size=(
512,
1024,
),
std=[
58.395,
57.12,
57.375,
],
type='SegDataPreProcessor'),
decode_head=dict(
align_corners=False,
channels=128,
concat_input=False,
in_channels=128,
in_index=-1,
loss_decode=dict(
loss_weight=1, type='CrossEntropyLoss', use_sigmoid=True),
norm_cfg=dict(requires_grad=True, type='BN'),
num_classes=6,
type='DepthwiseSeparableFCNHead'),
test_cfg=dict(mode='whole'),
train_cfg=dict(),
type='EncoderDecoder')
# auxiliary_head: 定义了两个辅助头部,通常用于多尺度或多任务学习,每个头部用于不同尺度特征的处理。FCNHead是全卷积网络头部,采用交叉熵损失函数(CrossEntropyLoss)来进行分类。
# backbone: 主干网络使用 FastSCNN,这是一种轻量级的卷积神经网络,适用于实时分割任务。定义了多个模块的输出通道数、步幅等。
# decode_head: 解码头部,通常用于生成分割图,采用DepthwiseSeparableFCNHead,它通过深度可分离卷积来减少计算量。
# test_cfg: 定义测试配置,这里采用了全图推理模式(mode='whole'),即整个图像进行推理。
norm_cfg = dict(requires_grad=True, type='BN')
optim_wrapper = dict(
clip_grad=None,
optimizer=dict(lr=0.12, momentum=0.9, type='SGD', weight_decay=4e-05),
type='OptimWrapper')
optimizer = dict(lr=0.12, momentum=0.9, type='SGD', weight_decay=4e-05)
param_scheduler = [
dict(
begin=0,
by_epoch=False,
end=160000,
eta_min=0.0001,
power=0.9,
type='PolyLR'),
]
randomness = dict(seed=0)
resume = False
test_cfg = dict(type='TestLoop')
test_dataloader = dict(
batch_size=1,
dataset=dict(
data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'),
data_root='Watermelon87_Semantic_Seg_Mask/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(keep_ratio=True, scale=(
2048,
1024,
), type='Resize'),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs'),
],
type='ZihaoDataset'),
num_workers=4,
persistent_workers=True,
sampler=dict(shuffle=False, type='DefaultSampler'))
test_evaluator = dict(
iou_metrics=[
'mIoU',
'mDice',
'mFscore',
], type='IoUMetric')
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(keep_ratio=True, scale=(
2048,
1024,
), type='Resize'),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs'),
]
train_cfg = dict(max_iters=500, type='IterBasedTrainLoop', val_interval=20)
train_dataloader = dict(
batch_size=3,
dataset=dict(
data_prefix=dict(
img_path='img_dir/train', seg_map_path='ann_dir/train'),
data_root='Watermelon87_Semantic_Seg_Mask/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
keep_ratio=True,
ratio_range=(
0.5,
2.0,
),
scale=(
2048,
1024,
),
type='RandomResize'),
dict(
cat_max_ratio=0.75, crop_size=(
512,
512,
), type='RandomCrop'),
dict(prob=0.5, type='RandomFlip'),
dict(type='PhotoMetricDistortion'),
dict(type='PackSegInputs'),
],
type='ZihaoDataset'),
num_workers=2,
persistent_workers=True,
sampler=dict(shuffle=True, type='InfiniteSampler'))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
keep_ratio=True,
ratio_range=(
0.5,
2.0,
),
scale=(
2048,
1024,
),
type='RandomResize'),
dict(cat_max_ratio=0.75, crop_size=(
512,
512,
), type='RandomCrop'),
dict(prob=0.5, type='RandomFlip'),
dict(type='PhotoMetricDistortion'),
dict(type='PackSegInputs'),
]
tta_model = dict(type='SegTTAModel')
tta_pipeline = [
dict(file_client_args=dict(backend='disk'), type='LoadImageFromFile'),
dict(
transforms=[
[
dict(keep_ratio=True, scale_factor=0.5, type='Resize'),
dict(keep_ratio=True, scale_factor=0.75, type='Resize'),
dict(keep_ratio=True, scale_factor=1.0, type='Resize'),
dict(keep_ratio=True, scale_factor=1.25, type='Resize'),
dict(keep_ratio=True, scale_factor=1.5, type='Resize'),
dict(keep_ratio=True, scale_factor=1.75, type='Resize'),
],
[
dict(direction='horizontal', prob=0.0, type='RandomFlip'),
dict(direction='horizontal', prob=1.0, type='RandomFlip'),
],
[
dict(type='LoadAnnotations'),
],
[
dict(type='PackSegInputs'),
],
],
type='TestTimeAug'),
]
val_cfg = dict(type='ValLoop')
val_dataloader = dict(
batch_size=1,
dataset=dict(
data_prefix=dict(img_path='img_dir/val', seg_map_path='ann_dir/val'),
data_root='Watermelon87_Semantic_Seg_Mask/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(keep_ratio=True, scale=(
2048,
1024,
), type='Resize'),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs'),
],
type='ZihaoDataset'),
num_workers=4,
persistent_workers=True,
sampler=dict(shuffle=False, type='DefaultSampler'))
val_evaluator = dict(
iou_metrics=[
'mIoU',
'mDice',
'mFscore',
], type='IoUMetric')
vis_backends = [
dict(type='LocalVisBackend'),
]
visualizer = dict(
name='visualizer',
type='SegLocalVisualizer',
vis_backends=[
dict(type='LocalVisBackend'),
])
work_dir = './work_dirs/ZihaoDataset-FastSCNN'