Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

Commit

Permalink
use class_accuracy instead of accuracy
Browse files Browse the repository at this point in the history
  • Loading branch information
yuyu2172 committed Jun 14, 2017
1 parent a46f761 commit 90ef526
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 13 deletions.
18 changes: 9 additions & 9 deletions chainercv/evaluations/eval_semantic_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,14 +118,12 @@ class :math:`j` by the prediction.
\\frac \
{\\sum_{i=1}^k N_{ii}} \
{\\sum_{i=1}^k \\sum_{j=1}^k N_{ij}}`
* :math:`\\text{Accuracy} = \
* :math:`\\text{Class Accuracy} = \
\\frac{N_{ii}}{\\sum_{j=1}^k N_{ij}}`
* :math:`\\text{Mean Accuracy} = \\frac{1}{k} \
* :math:`\\text{Mean Class Accuracy} = \\frac{1}{k} \
\\sum_{i=1}^k \
\\frac{N_{ii}}{\\sum_{j=1}^k N_{ij}}`
mIoU can be computed by taking :obj:`numpy.nanmean` of the IoUs returned
by this function.
The more detailed description of the above metric can be found in a
review on semantic segmentation [#]_.
Expand Down Expand Up @@ -162,9 +160,10 @@ class :math:`j` by the prediction.
:math:`n\_class` classes. Its shape is :math:`(n\_class,)`.
* **miou** (*float*): The average of IoUs over classes.
* **pixel_accuracy** (*float*): The computed pixel accuracy.
* **accuracy** (*numpy.ndarray*): An array of accuracies for the \
:math:`n\_class` classes. Its shape is :math:`(n\_class,)`.
* **mean_accuracy** (*float*): The average of accuracies.
* **class_accuracy** (*numpy.ndarray*): An array of class accuracies \
for the :math:`n\_class` classes. \
Its shape is :math:`(n\_class,)`.
* **class_mean_accuracy** (*float*): The average of class accuracies.
"""
# Evaluation code is based on
Expand All @@ -174,8 +173,9 @@ class :math:`j` by the prediction.
pred_labels, gt_labels)
iou = calc_semantic_segmentation_iou(confusion)
pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
accuracy = np.diag(confusion) / np.sum(confusion, axis=1)
class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

return {'iou': iou, 'miou': np.nanmean(iou),
'pixel_accuracy': pixel_accuracy,
'accuracy': accuracy, 'mean_accuracy': np.nanmean(accuracy)}
'class_accuracy': class_accuracy,
'mean_class_accuracy': np.nanmean(class_accuracy)}
8 changes: 4 additions & 4 deletions tests/evaluations_tests/test_eval_semantic_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
'gt_labels': iter(np.repeat([[[1, 0, 0], [0, -1, 1]]], 2, axis=0)),
'iou': np.array([4 / 6, 4 / 6]),
'pixel_accuracy': 4 / 5,
'accuracy': np.array([2 / 3, 2 / 2]),
'class_accuracy': np.array([2 / 3, 2 / 2]),
},
{'pred_labels': np.array([[[0, 0, 0], [0, 0, 0]]]),
'gt_labels': np.array([[[1, 1, 1], [1, 1, 1]]]),
'iou': np.array([0, 0]),
'pixel_accuracy': 0 / 6,
'accuracy': np.array([np.nan, 0])
'class_accuracy': np.array([np.nan, 0])
}
)
class TestEvalSemanticSegmentation(unittest.TestCase):
Expand All @@ -32,11 +32,11 @@ def test_eval_semantic_segmentation(self):
self.pred_labels, self.gt_labels)
np.testing.assert_equal(result['iou'], self.iou)
np.testing.assert_equal(result['pixel_accuracy'], self.pixel_accuracy)
np.testing.assert_equal(result['accuracy'], self.accuracy)
np.testing.assert_equal(result['class_accuracy'], self.class_accuracy)

np.testing.assert_equal(result['miou'], np.nanmean(self.iou))
np.testing.assert_equal(
result['mean_accuracy'], np.nanmean(self.accuracy))
result['mean_class_accuracy'], np.nanmean(self.class_accuracy))


class TestCalcSemanticSegmentationConfusion(unittest.TestCase):
Expand Down

0 comments on commit 90ef526

Please sign in to comment.