Skip to content

Commit e7aa570

Browse files
Alexander Kirillovfacebook-github-bot
authored andcommitted
Cityscapes semantic segmentation evaluator
Summary: semantic segmentation evaluation from Cityscapes scripts Reviewed By: ppwwyyxx Differential Revision: D19453083 fbshipit-source-id: bb51e743e29d5fcc54d845e0c55c6ae8baae60f7
1 parent 62cf3a2 commit e7aa570

File tree

6 files changed

+111
-18
lines changed

6 files changed

+111
-18
lines changed

detectron2/data/datasets/builtin.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,15 +183,15 @@ def register_all_cityscapes(root):
183183
),
184184
)
185185
MetadataCatalog.get(inst_key).set(
186-
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes", **meta
186+
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
187187
)
188188

189189
sem_key = key.format(task="sem_seg")
190190
DatasetCatalog.register(
191191
sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
192192
)
193193
MetadataCatalog.get(sem_key).set(
194-
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="sem_seg", **meta
194+
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_sem_seg", **meta
195195
)
196196

197197

detectron2/evaluation/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2-
from .cityscapes_evaluation import CityscapesEvaluator
2+
from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
33
from .coco_evaluation import COCOEvaluator
44
from .rotated_coco_evaluation import RotatedCOCOEvaluator
55
from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset

detectron2/evaluation/cityscapes_evaluation.py

Lines changed: 81 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
22
import glob
33
import logging
4+
import numpy as np
45
import os
56
import tempfile
67
from collections import OrderedDict
@@ -16,12 +17,7 @@
1617

1718
class CityscapesEvaluator(DatasetEvaluator):
1819
"""
19-
Evaluate instance segmentation results using cityscapes API.
20-
21-
Note:
22-
* It does not work in multi-machine distributed training.
23-
* It contains a synchronization, therefore has to be used on all ranks.
24-
* Only the main process runs evaluation.
20+
Base class for evaluation using cityscapes API.
2521
"""
2622

2723
def __init__(self, dataset_name):
@@ -47,6 +43,17 @@ def reset(self):
4743
"Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
4844
)
4945

46+
47+
class CityscapesInstanceEvaluator(CityscapesEvaluator):
48+
"""
49+
Evaluate instance segmentation results using cityscapes API.
50+
51+
Note:
52+
* It does not work in multi-machine distributed training.
53+
* It contains a synchronization, therefore has to be used on all ranks.
54+
* Only the main process runs evaluation.
55+
"""
56+
5057
def process(self, inputs, outputs):
5158
from cityscapesscripts.helpers.labels import name2label
5259

@@ -110,3 +117,71 @@ def evaluate(self):
110117
ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
111118
self._working_dir.cleanup()
112119
return ret
120+
121+
122+
class CityscapesSemSegEvaluator(CityscapesEvaluator):
123+
"""
124+
Evaluate semantic segmentation results using cityscapes API.
125+
126+
Note:
127+
* It does not work in multi-machine distributed training.
128+
* It contains a synchronization, therefore has to be used on all ranks.
129+
* Only the main process runs evaluation.
130+
"""
131+
132+
def process(self, inputs, outputs):
133+
from cityscapesscripts.helpers.labels import trainId2label
134+
135+
for input, output in zip(inputs, outputs):
136+
file_name = input["file_name"]
137+
basename = os.path.splitext(os.path.basename(file_name))[0]
138+
pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
139+
140+
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
141+
pred = 255 * np.ones(output.shape, dtype=np.uint8)
142+
for train_id, label in trainId2label.items():
143+
if label.ignoreInEval:
144+
continue
145+
pred[output == train_id] = label.id
146+
Image.fromarray(pred).save(pred_filename)
147+
148+
def evaluate(self):
149+
comm.synchronize()
150+
if comm.get_rank() > 0:
151+
return
152+
# Load the Cityscapes eval script *after* setting the required env var,
153+
# since the script reads CITYSCAPES_DATASET into global variables at load time.
154+
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
155+
156+
self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
157+
158+
# set some global states in cityscapes evaluation API, before evaluating
159+
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
160+
cityscapes_eval.args.predictionWalk = None
161+
cityscapes_eval.args.JSONOutput = False
162+
cityscapes_eval.args.colorized = False
163+
164+
# These lines are adopted from
165+
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
166+
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
167+
groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
168+
assert len(
169+
groundTruthImgList
170+
), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
171+
cityscapes_eval.args.groundTruthSearch
172+
)
173+
predictionImgList = []
174+
for gt in groundTruthImgList:
175+
predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
176+
results = cityscapes_eval.evaluateImgLists(
177+
predictionImgList, groundTruthImgList, cityscapes_eval.args
178+
)
179+
ret = OrderedDict()
180+
ret["sem_seg"] = {
181+
"IoU": 100.0 * results["averageScoreClasses"],
182+
"iIoU": 100.0 * results["averageScoreInstClasses"],
183+
"IoU_sup": 100.0 * results["averageScoreCategories"],
184+
"iIoU_sup": 100.0 * results["averageScoreInstCategories"],
185+
}
186+
self._working_dir.cleanup()
187+
return ret

projects/PointRend/train_net.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@
1616
from detectron2.data import MetadataCatalog
1717
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
1818
from detectron2.evaluation import (
19-
CityscapesEvaluator,
19+
CityscapesInstanceEvaluator,
20+
CityscapesSemSegEvaluator,
2021
COCOEvaluator,
2122
DatasetEvaluators,
2223
LVISEvaluator,
@@ -50,11 +51,16 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None):
5051
return LVISEvaluator(dataset_name, cfg, True, output_folder)
5152
if evaluator_type == "coco":
5253
return COCOEvaluator(dataset_name, cfg, True, output_folder)
53-
if evaluator_type == "cityscapes":
54+
if evaluator_type == "cityscapes_instance":
5455
assert (
5556
torch.cuda.device_count() >= comm.get_rank()
5657
), "CityscapesEvaluator currently do not work with multiple machines."
57-
return CityscapesEvaluator(dataset_name)
58+
return CityscapesInstanceEvaluator(dataset_name)
59+
if evaluator_type == "cityscapes_sem_seg":
60+
assert (
61+
torch.cuda.device_count() >= comm.get_rank()
62+
), "CityscapesEvaluator currently do not work with multiple machines."
63+
return CityscapesSemSegEvaluator(dataset_name)
5864
if len(evaluator_list) == 0:
5965
raise NotImplementedError(
6066
"no Evaluator for the dataset {} with the type {}".format(

tools/plain_train_net.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,8 @@
3535
)
3636
from detectron2.engine import default_argument_parser, default_setup, launch
3737
from detectron2.evaluation import (
38-
CityscapesEvaluator,
38+
CityscapesInstanceEvaluator,
39+
CityscapesSemSegEvaluator,
3940
COCOEvaluator,
4041
COCOPanopticEvaluator,
4142
DatasetEvaluators,
@@ -82,11 +83,16 @@ def get_evaluator(cfg, dataset_name, output_folder=None):
8283
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
8384
if evaluator_type == "coco_panoptic_seg":
8485
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
85-
if evaluator_type == "cityscapes":
86+
if evaluator_type == "cityscapes_instance":
8687
assert (
8788
torch.cuda.device_count() >= comm.get_rank()
8889
), "CityscapesEvaluator currently do not work with multiple machines."
89-
return CityscapesEvaluator(dataset_name)
90+
return CityscapesInstanceEvaluator(dataset_name)
91+
if evaluator_type == "cityscapes_sem_seg":
92+
assert (
93+
torch.cuda.device_count() >= comm.get_rank()
94+
), "CityscapesEvaluator currently do not work with multiple machines."
95+
return CityscapesSemSegEvaluator(dataset_name)
9096
if evaluator_type == "pascal_voc":
9197
return PascalVOCDetectionEvaluator(dataset_name)
9298
if evaluator_type == "lvis":

tools/train_net.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@
2727
from detectron2.data import MetadataCatalog
2828
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
2929
from detectron2.evaluation import (
30-
CityscapesEvaluator,
30+
CityscapesInstanceEvaluator,
31+
CityscapesSemSegEvaluator,
3132
COCOEvaluator,
3233
COCOPanopticEvaluator,
3334
DatasetEvaluators,
@@ -74,11 +75,16 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None):
7475
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
7576
if evaluator_type == "coco_panoptic_seg":
7677
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
77-
elif evaluator_type == "cityscapes":
78+
if evaluator_type == "cityscapes_instance":
7879
assert (
7980
torch.cuda.device_count() >= comm.get_rank()
8081
), "CityscapesEvaluator currently do not work with multiple machines."
81-
return CityscapesEvaluator(dataset_name)
82+
return CityscapesInstanceEvaluator(dataset_name)
83+
if evaluator_type == "cityscapes_sem_seg":
84+
assert (
85+
torch.cuda.device_count() >= comm.get_rank()
86+
), "CityscapesEvaluator currently do not work with multiple machines."
87+
return CityscapesSemSegEvaluator(dataset_name)
8288
elif evaluator_type == "pascal_voc":
8389
return PascalVOCDetectionEvaluator(dataset_name)
8490
elif evaluator_type == "lvis":

0 commit comments

Comments
 (0)