diff --git a/chainercv/evaluations/eval_semantic_segmentation.py b/chainercv/evaluations/eval_semantic_segmentation.py index 8e18d5abc8..397e3e1f26 100644 --- a/chainercv/evaluations/eval_semantic_segmentation.py +++ b/chainercv/evaluations/eval_semantic_segmentation.py @@ -118,14 +118,12 @@ class :math:`j` by the prediction. \\frac \ {\\sum_{i=1}^k N_{ii}} \ {\\sum_{i=1}^k \\sum_{j=1}^k N_{ij}}` - * :math:`\\text{Accuracy} = \ + * :math:`\\text{Class Accuracy} = \ \\frac{N_{ii}}{\\sum_{j=1}^k N_{ij}}` - * :math:`\\text{Mean Accuracy} = \\frac{1}{k} \ + * :math:`\\text{Mean Class Accuracy} = \\frac{1}{k} \ \\sum_{i=1}^k \ \\frac{N_{ii}}{\\sum_{j=1}^k N_{ij}}` - mIoU can be computed by taking :obj:`numpy.nanmean` of the IoUs returned - by this function. The more detailed description of the above metric can be found in a review on semantic segmentation [#]_. @@ -162,9 +160,10 @@ class :math:`j` by the prediction. :math:`n\_class` classes. Its shape is :math:`(n\_class,)`. * **miou** (*float*): The average of IoUs over classes. * **pixel_accuracy** (*float*): The computed pixel accuracy. - * **accuracy** (*numpy.ndarray*): An array of accuracies for the \ - :math:`n\_class` classes. Its shape is :math:`(n\_class,)`. - * **mean_accuracy** (*float*): The average of accuracies. + * **class_accuracy** (*numpy.ndarray*): An array of class accuracies \ + for the :math:`n\_class` classes. \ + Its shape is :math:`(n\_class,)`. + * **class_mean_accuracy** (*float*): The average of class accuracies. """ # Evaluation code is based on @@ -174,8 +173,9 @@ class :math:`j` by the prediction. pred_labels, gt_labels) iou = calc_semantic_segmentation_iou(confusion) pixel_accuracy = np.diag(confusion).sum() / confusion.sum() - accuracy = np.diag(confusion) / np.sum(confusion, axis=1) + class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1) return {'iou': iou, 'miou': np.nanmean(iou), 'pixel_accuracy': pixel_accuracy, - 'accuracy': accuracy, 'mean_accuracy': np.nanmean(accuracy)} + 'class_accuracy': class_accuracy, + 'mean_class_accuracy': np.nanmean(class_accuracy)} diff --git a/tests/evaluations_tests/test_eval_semantic_segmentation.py b/tests/evaluations_tests/test_eval_semantic_segmentation.py index 0c614f2aa9..a0d626523f 100644 --- a/tests/evaluations_tests/test_eval_semantic_segmentation.py +++ b/tests/evaluations_tests/test_eval_semantic_segmentation.py @@ -16,13 +16,13 @@ 'gt_labels': iter(np.repeat([[[1, 0, 0], [0, -1, 1]]], 2, axis=0)), 'iou': np.array([4 / 6, 4 / 6]), 'pixel_accuracy': 4 / 5, - 'accuracy': np.array([2 / 3, 2 / 2]), + 'class_accuracy': np.array([2 / 3, 2 / 2]), }, {'pred_labels': np.array([[[0, 0, 0], [0, 0, 0]]]), 'gt_labels': np.array([[[1, 1, 1], [1, 1, 1]]]), 'iou': np.array([0, 0]), 'pixel_accuracy': 0 / 6, - 'accuracy': np.array([np.nan, 0]) + 'class_accuracy': np.array([np.nan, 0]) } ) class TestEvalSemanticSegmentation(unittest.TestCase): @@ -32,11 +32,11 @@ def test_eval_semantic_segmentation(self): self.pred_labels, self.gt_labels) np.testing.assert_equal(result['iou'], self.iou) np.testing.assert_equal(result['pixel_accuracy'], self.pixel_accuracy) - np.testing.assert_equal(result['accuracy'], self.accuracy) + np.testing.assert_equal(result['class_accuracy'], self.class_accuracy) np.testing.assert_equal(result['miou'], np.nanmean(self.iou)) np.testing.assert_equal( - result['mean_accuracy'], np.nanmean(self.accuracy)) + result['mean_class_accuracy'], np.nanmean(self.class_accuracy)) class TestCalcSemanticSegmentationConfusion(unittest.TestCase):