segmentation

dataset_type = "CustomDataset"

img_dir = "/media/VA/databases/{{cookiecutter.dataset}}/images/"
ann_dir = "results/data/transform/coco_to_mmsegmentation-{{cookiecutter.dataset}}/masks/"
split_dir = "results/data/transform/coco_to_mmsegmentation-{{cookiecutter.dataset}}/"

CLASSES = ["background"]

img_scale = (512, 512)
keep_ratio = False

img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)

train_pipeline = [
    dict(type="LoadImageFromFile"),
    dict(type="LoadAnnotations"),
    dict(type="Resize", img_scale=img_scale, keep_ratio=keep_ratio),
    dict(type="Normalize", **img_norm_cfg),
    dict(type="DefaultFormatBundle"),
    dict(type="Collect", keys=["img", "gt_semantic_seg"]),
]

test_pipeline = [
    dict(type="LoadImageFromFile"),
    dict(
        type="MultiScaleFlipAug",
        img_scale=img_scale,
        flip=False,
        transforms=[
            dict(type="Resize", keep_ratio=keep_ratio),
            dict(type="Normalize", **img_norm_cfg),
            dict(type="ImageToTensor", keys=["img"]),
            dict(type="Collect", keys=["img"]),
        ],
    ),
]

data = dict(
    samples_per_gpu=4,
    workers_per_gpu=2,
    train=dict(
        type=dataset_type,
        img_dir=img_dir,
        ann_dir=ann_dir + "/train",
        split=split_dir + "/{{cookiecutter.dataset}}_train.txt",
        pipeline=train_pipeline,
        classes=CLASSES,
    ),
    val=dict(
        type=dataset_type,
        img_dir=img_dir,
        ann_dir=ann_dir + "/val",
        split=split_dir + "/{{cookiecutter.dataset}}_val.txt",
        pipeline=test_pipeline,
        classes=CLASSES,
    ),
    test=dict(
        type=dataset_type,
        img_dir=img_dir,
        ann_dir=ann_dir + "/val",
        split=split_dir + "/{{cookiecutter.dataset}}_val.txt",
        pipeline=test_pipeline,
        classes=CLASSES,
    ),
)