Skip to content

Commit

Permalink
Release version 1.1.0 (#38)
Browse files Browse the repository at this point in the history
New release provides an average recall metric.
  • Loading branch information
ybubnov authored Apr 4, 2019
1 parent 02b890d commit d1cf697
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 87 deletions.
21 changes: 4 additions & 17 deletions keras_metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from keras_metrics import casts


__version__ = "1.0.0"
__version__ = "1.1.0"


def metric_fn(cls, cast_strategy):
Expand All @@ -25,18 +25,6 @@ def fn(label=0, **kwargs):
sparse_categorical_metric = partial(
metric_fn, cast_strategy=casts.sparse_categorical)

binary_average_metric = partial(
metric_fn, cast_strategy=casts.binary_argmax
)

categorical_average_metric = partial(
metric_fn, cast_strategy=casts.argmax
)

sparse_categorical_average_metric = partial(
metric_fn, cast_strategy=casts.sparse_argmax
)


binary_true_positive = binary_metric(m.true_positive)
binary_true_negative = binary_metric(m.true_negative)
Expand All @@ -45,7 +33,7 @@ def fn(label=0, **kwargs):
binary_precision = binary_metric(m.precision)
binary_recall = binary_metric(m.recall)
binary_f1_score = binary_metric(m.f1_score)
binary_average_recall = binary_average_metric(m.average_recall)
binary_average_recall = binary_metric(m.average_recall)


categorical_true_positive = categorical_metric(m.true_positive)
Expand All @@ -55,7 +43,7 @@ def fn(label=0, **kwargs):
categorical_precision = categorical_metric(m.precision)
categorical_recall = categorical_metric(m.recall)
categorical_f1_score = categorical_metric(m.f1_score)
categorical_average_recall = categorical_average_metric(m.average_recall)
categorical_average_recall = categorical_metric(m.average_recall)


sparse_categorical_true_positive = sparse_categorical_metric(m.true_positive)
Expand All @@ -65,8 +53,7 @@ def fn(label=0, **kwargs):
sparse_categorical_precision = sparse_categorical_metric(m.precision)
sparse_categorical_recall = sparse_categorical_metric(m.recall)
sparse_categorical_f1_score = sparse_categorical_metric(m.f1_score)
sparse_categorical_average_recall = sparse_categorical_average_metric(
m.average_recall)
sparse_categorical_average_recall = sparse_categorical_metric(m.average_recall)


# For backward compatibility.
Expand Down
21 changes: 0 additions & 21 deletions keras_metrics/casts.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,3 @@ def sparse_categorical(y_true, y_pred, dtype="int32", label=0):
y_pred = K.cast(K.round(y_pred), dtype)

return y_true, y_pred


def binary_argmax(y_true, y_pred, dtype="int32", label=0):
y_true, y_pred = K.squeeze(y_true, axis=-1), K.squeeze(y_pred, axis=-1)
y_true, y_pred = K.cast(y_true, dtype=dtype), K.cast(y_pred, dtype=dtype)

return y_true, y_pred


def argmax(y_true, y_pred, dtype="int32", label=0):
y_true, y_pred = K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)
y_true, y_pred = K.cast(y_true, dtype=dtype), K.cast(y_pred, dtype=dtype)

return y_true, y_pred


def sparse_argmax(y_true, y_pred, dtype="int32", label=0):
y_true, y_pred = K.squeeze(y_true, axis=-1), K.argmax(y_pred, axis=-1)
y_true, y_pred = K.cast(y_true, dtype=dtype), K.cast(y_pred, dtype=dtype)

return y_true, y_pred
54 changes: 18 additions & 36 deletions keras_metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,51 +232,33 @@ class average_recall(layer):
"""Create a metric for the average recall calculation.
"""

def __init__(self, name="average_recall", classes=2, **kwargs):
def __init__(self, name="average_recall", labels=1, **kwargs):
super(average_recall, self).__init__(name=name, **kwargs)

if classes < 2:
raise ValueError('argument classes must >= 2')
self.labels = labels

self.classes = classes

self.true = K.zeros(classes, dtype="int32")
self.pred = K.zeros(classes, dtype="int32")
self.tp = K.zeros(labels, dtype="int32")
self.fn = K.zeros(labels, dtype="int32")

def reset_states(self):
K.set_value(self.true, [0 for v in range(self.classes)])
K.set_value(self.pred, [0 for v in range(self.classes)])
K.set_value(self.tp, [0]*self.labels)
K.set_value(self.fn, [0]*self.labels)

def __call__(self, y_true, y_pred):
# Cast input
t, p = self.cast(y_true, y_pred, dtype="float64")

# Init a bias matrix
b = K.variable([truediv(1, (v + 1)) for v in range(self.classes)],
dtype="float64")

# Simulate to_categorical operation
t, p = K.expand_dims(t, axis=-1), K.expand_dims(p, axis=-1)
t, p = (t + 1) * b - 1, (p + 1) * b - 1

# Make correct position filled with 1
t, p = K.cast(t, "bool"), K.cast(p, "bool")
t, p = 1 - K.cast(t, "int32"), 1 - K.cast(p, "int32")

t, p = K.transpose(t), K.transpose(p)
y_true = K.cast(K.round(y_true), "int32")
y_pred = K.cast(K.round(y_pred), "int32")
neg_y_pred = 1 - y_pred

# Results for current batch
batch_t = K.sum(t, axis=-1)
batch_p = K.sum(t * p, axis=-1)
tp = K.sum(K.transpose(y_true * y_pred), axis=-1)
fn = K.sum(K.transpose(y_true * neg_y_pred), axis=-1)

# Accumulated results
total_t = self.true * 1 + batch_t
total_p = self.pred * 1 + batch_p
current_tp = K.cast(self.tp + tp, self.epsilon.dtype)
current_fn = K.cast(self.fn + fn, self.epsilon.dtype)

self.add_update(K.update_add(self.true, batch_t))
self.add_update(K.update_add(self.pred, batch_p))
tp_update = K.update_add(self.tp, tp)
fn_update = K.update_add(self.fn, fn)

tp = K.cast(total_p, dtype='float64')
tt = K.cast(total_t, dtype='float64')
self.add_update(tp_update, inputs=[y_true, y_pred])
self.add_update(fn_update, inputs=[y_true, y_pred])

return K.mean(truediv(tp, (tt + self.epsilon)))
return K.mean(truediv(current_tp, current_tp + current_fn + self.epsilon))
42 changes: 42 additions & 0 deletions tests/test_average_recall.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import keras
import keras.utils
import keras_metrics as km
import numpy
import unittest


class TestAverageRecall(unittest.TestCase):

def create_samples(self, n, labels=1):
x = numpy.random.uniform(0, numpy.pi/2, (n, labels))
y = numpy.random.randint(labels, size=(n, 1))
return x, keras.utils.to_categorical(y)

def test_average_recall(self):
model = keras.models.Sequential()
model.add(keras.layers.Activation(keras.backend.sin))
model.add(keras.layers.Activation(keras.backend.abs))
model.add(keras.layers.Softmax())
model.compile(optimizer="sgd",
loss="categorical_crossentropy",
metrics=[
km.categorical_recall(label=0),
km.categorical_recall(label=1),
km.categorical_recall(label=2),
km.categorical_average_recall(labels=3),
])

x, y = self.create_samples(10000, labels=3)

model.fit(x, y, epochs=10, batch_size=100)
metrics = model.evaluate(x, y, batch_size=100)[1:]

r0, r1, r2 = metrics[0:3]
average_recall = metrics[3]

expected_recall = (r0+r1+r2)/3.0
self.assertAlmostEqual(expected_recall, average_recall, places=3)


if __name__ == "__main__":
unittest.main()
13 changes: 0 additions & 13 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import keras
import keras.backend
import keras.utils
import keras.regularizers
import keras_metrics as km
import itertools
import numpy
Expand All @@ -21,7 +20,6 @@ class TestMetrics(unittest.TestCase):
km.binary_precision,
km.binary_recall,
km.binary_f1_score,
km.binary_average_recall
]

categorical_metrics = [
Expand All @@ -32,7 +30,6 @@ class TestMetrics(unittest.TestCase):
km.categorical_precision,
km.categorical_recall,
km.categorical_f1_score,
km.categorical_average_recall,
]

sparse_categorical_metrics = [
Expand All @@ -43,7 +40,6 @@ class TestMetrics(unittest.TestCase):
km.sparse_categorical_precision,
km.sparse_categorical_recall,
km.sparse_categorical_f1_score,
km.sparse_categorical_average_recall,
]

def create_binary_samples(self, n):
Expand All @@ -63,9 +59,6 @@ def create_model(self, outputs, loss, metrics_fns):
model.add(keras.layers.Activation(keras.backend.sin))
model.add(keras.layers.Activation(keras.backend.abs))
model.add(keras.layers.Lambda(lambda x: K.concatenate([x]*outputs)))
scale = [v + 1 for v in range(outputs)]
model.add(keras.layers.Lambda(lambda x: (0.5 - x) * scale + 1))
model.add(keras.layers.Softmax())
model.compile(optimizer="sgd",
loss=loss,
metrics=self.create_metrics(metrics_fns))
Expand Down Expand Up @@ -132,14 +125,10 @@ def assert_metrics(self, model, samples_fn):
precision = metrics[4]
recall = metrics[5]
f1 = metrics[6]
average_recall = metrics[7]

expected_precision = tp_val / (tp_val + fp_val)
expected_recall = tp_val / (tp_val + fn_val)

expected_average_recall = (
expected_recall + (tn_val / (fp_val + tn_val))) / 2

f1_divident = (expected_precision*expected_recall)
f1_divisor = (expected_precision+expected_recall)
expected_f1 = (2 * f1_divident / f1_divisor)
Expand All @@ -155,8 +144,6 @@ def assert_metrics(self, model, samples_fn):
self.assertAlmostEqual(expected_precision, precision, places=places)
self.assertAlmostEqual(expected_recall, recall, places=places)
self.assertAlmostEqual(expected_f1, f1, places=places)
self.assertAlmostEqual(expected_average_recall,
average_recall, places=places)

def test_binary_metrics(self):
model = self.create_model(1, "binary_crossentropy",
Expand Down

0 comments on commit d1cf697

Please sign in to comment.