Spaces:
Runtime error
Runtime error
import contextlib | |
import copy | |
import io | |
import itertools | |
import json | |
import logging | |
import numpy as np | |
import os | |
import pickle | |
from collections import OrderedDict | |
import pycocotools.mask as mask_util | |
import torch | |
from pycocotools.coco import COCO | |
from pycocotools.cocoeval import COCOeval | |
from tabulate import tabulate | |
import detectron2.utils.comm as comm | |
from detectron2.config import CfgNode | |
from detectron2.data import MetadataCatalog | |
from detectron2.data.datasets.coco import convert_to_coco_json | |
from detectron2.evaluation.coco_evaluation import COCOEvaluator, _evaluate_predictions_on_coco | |
from detectron2.evaluation.fast_eval_api import COCOeval_opt | |
from detectron2.structures import Boxes, BoxMode, pairwise_iou | |
from detectron2.utils.file_io import PathManager | |
from detectron2.utils.logger import create_small_table | |
# modified from COCOEvaluator for instance segmetnat | |
class InstanceSegEvaluator(COCOEvaluator): | |
""" | |
Evaluate AR for object proposals, AP for instance detection/segmentation, AP | |
for keypoint detection outputs using COCO's metrics. | |
See http://cocodataset.org/#detection-eval and | |
http://cocodataset.org/#keypoints-eval to understand its metrics. | |
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means | |
the metric cannot be computed (e.g. due to no predictions made). | |
In addition to COCO, this evaluator is able to support any bounding box detection, | |
instance segmentation, or keypoint detection dataset. | |
""" | |
def _eval_predictions(self, predictions, img_ids=None): | |
""" | |
Evaluate predictions. Fill self._results with the metrics of the tasks. | |
""" | |
self._logger.info("Preparing results for COCO format ...") | |
coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) | |
tasks = self._tasks or self._tasks_from_predictions(coco_results) | |
# unmap the category ids for COCO | |
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): | |
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id | |
# all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) | |
# num_classes = len(all_contiguous_ids) | |
# assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 | |
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} | |
for result in coco_results: | |
category_id = result["category_id"] | |
# assert category_id < num_classes, ( | |
# f"A prediction has class={category_id}, " | |
# f"but the dataset only has {num_classes} classes and " | |
# f"predicted class id should be in [0, {num_classes - 1}]." | |
# ) | |
assert category_id in reverse_id_mapping, ( | |
f"A prediction has class={category_id}, " | |
f"but the dataset only has class ids in {dataset_id_to_contiguous_id}." | |
) | |
result["category_id"] = reverse_id_mapping[category_id] | |
if self._output_dir: | |
file_path = os.path.join(self._output_dir, "coco_instances_results.json") | |
self._logger.info("Saving results to {}".format(file_path)) | |
with PathManager.open(file_path, "w") as f: | |
f.write(json.dumps(coco_results)) | |
f.flush() | |
if not self._do_evaluation: | |
self._logger.info("Annotations are not available for evaluation.") | |
return | |
self._logger.info( | |
"Evaluating predictions with {} COCO API...".format( | |
"unofficial" if self._use_fast_impl else "official" | |
) | |
) | |
for task in sorted(tasks): | |
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" | |
coco_eval = ( | |
_evaluate_predictions_on_coco( | |
self._coco_api, | |
coco_results, | |
task, | |
kpt_oks_sigmas=self._kpt_oks_sigmas, | |
#use_fast_impl=self._use_fast_impl, | |
img_ids=img_ids, | |
max_dets_per_image=self._max_dets_per_image, | |
) | |
if len(coco_results) > 0 | |
else None # cocoapi does not handle empty results very well | |
) | |
res = self.derive_coco_results( | |
coco_eval, task, class_names=self._metadata.get("thing_classes") | |
) | |
self._results[task] = res | |
def derive_coco_results(self, coco_eval, iou_type, class_names=None): | |
""" | |
Derive the desired score numbers from summarized COCOeval. | |
Args: | |
coco_eval (None or COCOEval): None represents no predictions from model. | |
iou_type (str): | |
class_names (None or list[str]): if provided, will use it to predict | |
per-category AP. | |
Returns: | |
a dict of {metric name: score} | |
""" | |
metrics = { | |
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"], | |
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], | |
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR@1", "AR@10", "AR@100"], | |
}[iou_type] | |
if coco_eval is None: | |
self._logger.warn("No predictions from the model!") | |
return {metric: float("nan") for metric in metrics} | |
# the standard metrics | |
results = { | |
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") | |
for idx, metric in enumerate(metrics) | |
} | |
self._logger.info( | |
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results) | |
) | |
if not np.isfinite(sum(results.values())): | |
self._logger.info("Some metrics cannot be computed and is shown as NaN.") | |
if class_names is None or len(class_names) <= 1: | |
return results | |
# Compute per-category AP | |
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa | |
precisions = coco_eval.eval["precision"] | |
# precision has dims (iou, recall, cls, area range, max dets) | |
assert len(class_names) == precisions.shape[2] | |
results_per_category = [] | |
for idx, name in enumerate(class_names): | |
# area range index 0: all area ranges | |
# max dets index -1: typically 100 per image | |
precision = precisions[:, :, idx, 0, -1] | |
precision = precision[precision > -1] | |
ap = np.mean(precision) if precision.size else float("nan") | |
results_per_category.append(("{}".format(name), float(ap * 100))) | |
# tabulate it | |
N_COLS = min(6, len(results_per_category) * 2) | |
results_flatten = list(itertools.chain(*results_per_category)) | |
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) | |
table = tabulate( | |
results_2d, | |
tablefmt="pipe", | |
floatfmt=".3f", | |
headers=["category", "AP"] * (N_COLS // 2), | |
numalign="left", | |
) | |
self._logger.info("Per-category {} AP: \n".format(iou_type) + table) | |
results.update({"AP-" + name: ap for name, ap in results_per_category}) | |
return results |