Skip to content

Commit

Permalink
Skip unrolling follow up (#3260)
Browse files Browse the repository at this point in the history
* Update accuracy.py, test_accuracy.py with skip_unrolling

* change test_accuracy.py

* update average_precision, epoch_metric, test_epoch_metric, test_average_precision

* Update chohen_kappa.py, test_cohen_kappa.py

* update confusion_matrix, cosine_similarity, test_confusion_matrix, test_cosine_similarity

* Update docstring for js_divergence, kl_divergence and entropy

* update maximum_mean_discrepancy.py, mean_absolute_error.py and test_maximum_mean_discrepancy.py

* Update mean_pairwise_distance.py, metrics/test_maximum_mean_discrepancy.py and test_mean_pairwise_distance.py

* Update mean_squared_error.py, mutual_information.py, multilabel_consfusion_matrix.py, test_multilabel_confusion_matrix.py

* Update precision_recall_curve, test_precision_recall_curve

* update precision, psnr, recall, root_mean_square and add tests

* Remove unwanted tests, update roc_auc, update docstring for mpd, average_precision

* update running_average, ssim, top_k_categorical_accuracy

* update frequency.py

* update accumulation.py, fix mean_pairwise_distance
  • Loading branch information
simeetnayan81 committed Jul 16, 2024
1 parent d715807 commit 6b6b169
Show file tree
Hide file tree
Showing 28 changed files with 308 additions and 23 deletions.
40 changes: 35 additions & 5 deletions ignite/metrics/accumulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,12 @@ class VariableAccumulation(Metric):
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

required_output_keys = None
Expand All @@ -45,13 +50,16 @@ def __init__(
op: Callable,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
if not callable(op):
raise TypeError(f"Argument op should be a callable, but given {type(op)}")

self._op = op

super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)
super(VariableAccumulation, self).__init__(
output_transform=output_transform, device=device, skip_unrolling=skip_unrolling
)

@reinit__is_reduced
def reset(self) -> None:
Expand Down Expand Up @@ -110,6 +118,9 @@ class Average(VariableAccumulation):
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Examples:
Expand Down Expand Up @@ -164,17 +175,25 @@ class Average(VariableAccumulation):
.. testoutput::
tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
self,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:
if isinstance(x, torch.Tensor) and x.ndim > 1:
x = x.sum(dim=0)
return a + x

super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)
super(Average, self).__init__(
op=_mean_op, output_transform=output_transform, device=device, skip_unrolling=skip_unrolling
)

@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[float, torch.Tensor]:
Expand All @@ -200,6 +219,9 @@ class GeometricAverage(VariableAccumulation):
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Note:
Expand Down Expand Up @@ -267,10 +289,16 @@ class GeometricAverage(VariableAccumulation):
.. testoutput::
tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
self,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
Expand All @@ -280,7 +308,9 @@ def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:
x = x.sum(dim=0)
return a + x

super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)
super(GeometricAverage, self).__init__(
op=_geom_op, output_transform=output_transform, device=device, skip_unrolling=skip_unrolling
)

@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[float, torch.Tensor]:
Expand Down
16 changes: 14 additions & 2 deletions ignite/metrics/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,14 @@ def __init__(
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
self._is_multilabel = is_multilabel
self._type: Optional[str] = None
self._num_classes: Optional[int] = None
super(_BaseClassification, self).__init__(output_transform=output_transform, device=device)
super(_BaseClassification, self).__init__(
output_transform=output_transform, device=device, skip_unrolling=skip_unrolling
)

def reset(self) -> None:
self._type = None
Expand Down Expand Up @@ -114,6 +117,9 @@ class Accuracy(_BaseClassification):
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Examples:
Expand Down Expand Up @@ -206,6 +212,9 @@ def thresholded_output_transform(output):
.. testoutput:: 4
0.6666...
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

_state_dict_all_req_keys = ("_num_correct", "_num_examples")
Expand All @@ -215,8 +224,11 @@ def __init__(
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
super(Accuracy, self).__init__(output_transform=output_transform, is_multilabel=is_multilabel, device=device)
super(Accuracy, self).__init__(
output_transform=output_transform, is_multilabel=is_multilabel, device=device, skip_unrolling=skip_unrolling
)

@reinit__is_reduced
def reset(self) -> None:
Expand Down
7 changes: 7 additions & 0 deletions ignite/metrics/average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ class AveragePrecision(EpochMetric):
#sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Note:
AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
Expand Down Expand Up @@ -60,13 +63,16 @@ def activated_output_transform(output):
0.9166...
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

def __init__(
self,
output_transform: Callable = lambda x: x,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
try:
from sklearn.metrics import average_precision_score # noqa: F401
Expand All @@ -78,4 +84,5 @@ def __init__(
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
skip_unrolling=skip_unrolling,
)
7 changes: 7 additions & 0 deletions ignite/metrics/cohen_kappa.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ class CohenKappa(EpochMetric):
is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
Expand All @@ -46,6 +49,8 @@ class CohenKappa(EpochMetric):
0.4285...
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

def __init__(
Expand All @@ -54,6 +59,7 @@ def __init__(
weights: Optional[str] = None,
check_compute_fn: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
try:
from sklearn.metrics import cohen_kappa_score # noqa: F401
Expand All @@ -72,6 +78,7 @@ def __init__(
output_transform=output_transform,
check_compute_fn=check_compute_fn,
device=device,
skip_unrolling=skip_unrolling,
)

def get_cohen_kappa_fn(self) -> Callable[[torch.Tensor, torch.Tensor], float]:
Expand Down
11 changes: 10 additions & 1 deletion ignite/metrics/confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ class ConfusionMatrix(Metric):
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Note:
The confusion matrix is formatted such that columns are predictions and rows are targets.
Expand Down Expand Up @@ -98,6 +101,9 @@ def binary_one_hot_output_transform(output):
tensor([[2, 1],
[1, 1]])
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

_state_dict_all_req_keys = ("confusion_matrix", "_num_examples")
Expand All @@ -108,6 +114,7 @@ def __init__(
average: Optional[str] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = True,
):
if average is not None and average not in ("samples", "recall", "precision"):
raise ValueError("Argument average can None or one of 'samples', 'recall', 'precision'")
Expand All @@ -118,7 +125,9 @@ def __init__(
self.num_classes = num_classes
self._num_examples = 0
self.average = average
super(ConfusionMatrix, self).__init__(output_transform=output_transform, device=device)
super(ConfusionMatrix, self).__init__(
output_transform=output_transform, device=device, skip_unrolling=skip_unrolling
)

@reinit__is_reduced
def reset(self) -> None:
Expand Down
9 changes: 8 additions & 1 deletion ignite/metrics/cosine_similarity.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ class CosineSimilarity(Metric):
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
Expand Down Expand Up @@ -65,15 +68,19 @@ class CosineSimilarity(Metric):
.. testoutput::
0.5080491304397583
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

def __init__(
self,
eps: float = 1e-8,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
):
super().__init__(output_transform, device)
super().__init__(output_transform, device, skip_unrolling=skip_unrolling)

self.eps = eps

Expand Down
6 changes: 6 additions & 0 deletions ignite/metrics/entropy.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ class Entropy(Metric):
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
Expand Down Expand Up @@ -58,6 +61,9 @@ class Entropy(Metric):
.. testoutput::
0.8902875582377116
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

_state_dict_all_req_keys = ("_sum_of_entropies", "_num_examples")
Expand Down
8 changes: 7 additions & 1 deletion ignite/metrics/epoch_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@ def mse_fn(y_preds, y_targets):
Warnings:
EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed.
To disable the warning, set ``check_compute_fn=False``.
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

_state_dict_all_req_keys = ("_predictions", "_targets")
Expand All @@ -75,14 +78,17 @@ def __init__(
output_transform: Callable = lambda x: x,
check_compute_fn: bool = True,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
) -> None:
if not callable(compute_fn):
raise TypeError("Argument compute_fn should be callable.")

self.compute_fn = compute_fn
self._check_compute_fn = check_compute_fn

super(EpochMetric, self).__init__(output_transform=output_transform, device=device)
super(EpochMetric, self).__init__(
output_transform=output_transform, device=device, skip_unrolling=skip_unrolling
)

@reinit__is_reduced
def reset(self) -> None:
Expand Down
22 changes: 20 additions & 2 deletions ignite/metrics/frequency.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,18 @@
class Frequency(Metric):
"""Provides metrics for the number of examples processed per second.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
Alternatively, ``output_transform`` can be used to handle this.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
Expand All @@ -36,12 +48,18 @@ class Frequency(Metric):
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
# Progress bar will look like
# Epoch [2/10]: [50/100] 50%|█████ , wps=400 [00:17<00:35]
.. versionchanged:: 0.5.1
``skip_unrolling`` argument is added.
"""

def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
self,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
skip_unrolling: bool = False,
) -> None:
super(Frequency, self).__init__(output_transform=output_transform, device=device)
super(Frequency, self).__init__(output_transform=output_transform, device=device, skip_unrolling=skip_unrolling)

@reinit__is_reduced
def reset(self) -> None:
Expand Down
Loading

0 comments on commit 6b6b169

Please sign in to comment.