Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated math.py & nn.py with docstrings. #484

Merged
merged 15 commits into from
Jul 16, 2023
Merged
114 changes: 114 additions & 0 deletions keras_core/ops/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,30 @@ def call(self, data, segment_ids):

@keras_core_export("keras_core.ops.segment_sum")
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
"""Computes the sum of segments in a tensor.

Args:
data: Input tensor.
segment_ids: A 1-D tensor containing segment indices for each
element in `data`.
num_segments: An integer representing the total number of
segments. If not specified, it is inferred from the maximum
value in `segment_ids`.
sorted: A boolean indicating whether `segment_ids` is sorted.
Default is `False`.

Returns:
A tensor containing the sum of segments, where each element
represents the sum of the corresponding segment in `data`.

Example:
Sheiphan marked this conversation as resolved.
Show resolved Hide resolved
```python
>>> data = keras_core.ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
>>> segment_ids = keras_core.ops.convert_to_tensor([0, 1, 0, 1, 0, 1])
>>> segment_sum(data, segment_ids)
array([9 12], shape=(2,), dtype=int32)
```
"""
if any_symbolic_tensors((data,)):
return SegmentSum(num_segments, sorted).symbolic_call(data, segment_ids)
return backend.math.segment_sum(
Expand Down Expand Up @@ -63,6 +87,29 @@ def call(self, x):

@keras_core_export("keras_core.ops.top_k")
def top_k(x, k, sorted=True):
"""Finds the top-k values and their indices in a tensor.

Args:
x: Input tensor.
k: An integer representing the number of top elements to retrieve.
sorted: A boolean indicating whether to sort the output in
descending order. Default is `True`.

Returns:
A tuple containing two tensors. The first tensor contains the
top-k values, and the second tensor contains the indices of the
top-k values in the input tensor.

Example:
```python
>>> x = keras_core.ops.convert_to_tensor([5, 2, 7, 1, 9, 3])
>>> values, indices = top_k(x, k=3)
>>> print(values)
array([9 7 5], shape=(3,), dtype=int32)
>>> print(indices)
array([4 2 0], shape=(3,), dtype=int32)
```
"""
if any_symbolic_tensors((x,)):
return TopK(k, sorted).symbolic_call(x)
return backend.math.top_k(x, k, sorted)
Expand All @@ -82,6 +129,28 @@ def call(self, targets, predictions):

@keras_core_export("keras_core.ops.in_top_k")
def in_top_k(targets, predictions, k):
"""Checks if the targets are in the top-k predictions.

Args:
targets: A tensor of true labels.
predictions: A tensor of predicted labels.
k: An integer representing the number of predictions to consider.

Returns:
A boolean tensor of the same shape as `targets`, where each element
indicates whether the corresponding target is in the top-k predictions.

Example:
```python
Sheiphan marked this conversation as resolved.
Show resolved Hide resolved
>>> targets = keras_core.ops.convert_to_tensor([2, 5, 3])
>>> predictions = keras_core.ops.convert_to_tensor(
[[0.1, 0.4, 0.6, 0.9, 0.5],
[0.1, 0.7, 0.9, 0.8, 0.3],
[0.1, 0.6, 0.9, 0.9, 0.5]])
>>> in_top_k(targets, predictions, k=3)
array([ True False True], shape=(3,), dtype=bool)
```
"""
if any_symbolic_tensors((targets, predictions)):
return InTopK(k).symbolic_call(targets, predictions)
return backend.math.in_top_k(targets, predictions, k)
Expand All @@ -103,6 +172,27 @@ def call(self, x):

@keras_core_export("keras_core.ops.logsumexp")
def logsumexp(x, axis=None, keepdims=False):
"""Computes the logarithm of sum of exponentials of elements in a tensor.

Args:
x: Input tensor.
axis: An integer or a tuple of integers specifying the axis/axes
along which to compute the sum. If `None`, the sum is computed
over all elements. Default is `None`.
keepdims: A boolean indicating whether to keep the dimensions of
the input tensor when computing the sum. Default is `False`.

Returns:
A tensor containing the logarithm of the sum of exponentials of
elements in `x`.

Example:
```python
>>> x = keras_core.ops.convert_to_tensor([1., 2., 3.])
>>> logsumexp(x)
array(3.407606, shape=(), dtype=float32)
```
"""
if any_symbolic_tensors((x,)):
return Logsumexp(axis, keepdims).symbolic_call(x)
return backend.math.logsumexp(x, axis=axis, keepdims=keepdims)
Expand Down Expand Up @@ -152,6 +242,30 @@ def call(self, x):

@keras_core_export("keras_core.ops.qr")
def qr(x, mode="reduced"):
"""Computes the QR decomposition of a tensor.

Args:
x: Input tensor.
mode: A string specifying the mode of the QR decomposition.
- 'reduced': Returns the reduced QR decomposition. (default)
- 'complete': Returns the complete QR decomposition.

Returns:
A tuple containing two tensors. The first tensor represents the
orthogonal matrix Q, and the second tensor represents the upper
triangular matrix R.

Example:
```python
>>> x = keras_core.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]])
>>> q, r = qr(x)
>>> print(q)
array([[-0.16903079 0.897085]
[-0.5070925 0.2760267 ]
[-0.8451542 -0.34503305]], shape=(3, 2), dtype=float32)
```
"""

if any_symbolic_tensors((x,)):
return Qr(mode=mode).symbolic_call(x)
return backend.math.qr(x, mode=mode)
150 changes: 149 additions & 1 deletion keras_core/ops/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -899,7 +899,7 @@ def conv_transpose(
the output tensor. Can be a single integer to specify the same
value for all spatial dimensions. The amount of output padding
along a given dimension must be lower than the stride along that
same dimension. If set to None (default), the output shape is
same dimension. If set to `None` (default), the output shape is
inferred.
data_format: A string, either "channels_last" or `channels_first`.
`data_format` determines the ordering of the dimensions in the
Expand Down Expand Up @@ -959,6 +959,38 @@ def compute_output_spec(self, x):

@keras_core_export(["keras_core.ops.one_hot", "keras_core.ops.nn.one_hot"])
def one_hot(x, num_classes, axis=-1, dtype=None):
"""Converts integer tensor `x` into a one-hot tensor.

The one-hot encoding is a representation where each integer value is
converted into a binary vector with a length equal to `num_classes`,
and the index corresponding to the integer value is marked as 1, while
all other indices are marked as 0.

Args:
x : Integer tensor to be encoded. The shape can be
arbitrary, but the dtype should be integer.
num_classes: Number of classes for the one-hot encoding.
axis: Axis along which the encoding is performed. Default is
-1, which represents the last axis.
dtype: (Optional) Data type of the output tensor. If not
provided, it defaults to the default data type of the backend.

Returns:
Integer tensor: One-hot encoded tensor with the same shape as `x`
except for the specified `axis` dimension, which will have
a length of `num_classes`. The dtype of the output tensor
is determined by `dtype` or the default data type of the backend.

Example:
```python
>>> x = keras_core.ops.convert_to_tensor([1, 3, 2, 0])
>>> one_hot(x, num_classes=4)
array([[0. 1. 0. 0.]
[0. 0. 0. 1.]
[0. 0. 1. 0.]
[1. 0. 0. 0.]], shape=(4, 4), dtype=float32)
```
"""
if any_symbolic_tensors((x,)):
return OneHot(num_classes, axis=axis, dtype=dtype).symbolic_call(x)
return backend.nn.one_hot(
Expand Down Expand Up @@ -993,6 +1025,40 @@ def compute_output_spec(self, target, output):
]
)
def binary_crossentropy(target, output, from_logits=False):
"""Computes binary cross-entropy loss between target and output tensor.

The binary cross-entropy loss is commonly used in binary
classification tasks where each input sample belongs to one
of the two classes. It measures the dissimilarity between the
target and output probabilities or logits.

Args:
target: The target tensor representing the true binary labels.
Its shape should match the shape of the `output` tensor.
output: The output tensor representing the predicted probabilities
or logits. Its shape should match the shape of the
`target` tensor.
from_logits: (optional) Whether `output` is a tensor of logits or
probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Default is `False`.

Returns:
Integer tensor: The computed binary cross-entropy loss between
`target` and `output`.

Example:
```python
>>> target = keras_core.ops.convert_to_tensor([0, 1, 1, 0],
dtype=float32)
>>> output = keras_core.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2],
dtype=float32)
>>> binary_crossentropy(target, output)
array([0.10536054 0.10536054 0.22314355 0.22314355],
shape=(4,), dtype=float32)
```
"""
if any_symbolic_tensors((target, output)):
return BinaryCrossentropy(from_logits=from_logits).symbolic_call(
target, output
Expand Down Expand Up @@ -1036,6 +1102,48 @@ def compute_output_spec(self, target, output):
]
)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Computes categorical cross-entropy loss between target and output tensor.

The categorical cross-entropy loss is commonly used in multi-class
classification tasks where each input sample can belong to one of
multiple classes. It measures the dissimilarity
between the target and output probabilities or logits.

Args:
target: The target tensor representing the true categorical labels.
Its shape should match the shape of the `output` tensor
except for the last dimension.
output: The output tensor representing the predicted probabilities
or logits. Its shape should match the shape of the `target`
tensor except for the last dimension.
from_logits: (optional) Whether `output` is a tensor of logits or
probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Default is `False`.
axis: (optional) The axis along which the categorical cross-entropy
is computed.
Default is -1, which corresponds to the last dimension of
the tensors.

Returns:
Integer tensor: The computed categorical cross-entropy loss between
`target` and `output`.

Example:
```python
>>> target = keras_core.ops.convert_to_tensor([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=float32)
>>> output = keras_core.ops.convert_to_tensor([[0.9, 0.05, 0.05],
[0.1, 0.8, 0.1],
[0.2, 0.3, 0.5]],
dtype=float32)
>>> categorical_crossentropy(target, output)
array([0.10536054 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
```
"""
if any_symbolic_tensors((target, output)):
return CategoricalCrossentropy(
from_logits=from_logits, axis=axis
Expand Down Expand Up @@ -1082,6 +1190,46 @@ def compute_output_spec(self, target, output):
]
)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Computes sparse categorical cross-entropy loss.

The sparse categorical cross-entropy loss is similar to categorical
cross-entropy, but it is used when the target tensor contains integer
class labels instead of one-hot encoded vectors. It measures the
dissimilarity between the target and output probabilities or logits.

Args:
target: The target tensor representing the true class labels as integers.
Its shape should match the shape of the `output` tensor except
for the last dimension.
output: The output tensor representing the predicted probabilities
or logits.
Its shape should match the shape of the `target` tensor except
for the last dimension.
from_logits: (optional) Whether `output` is a tensor of logits
or probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Default is `False`.
axis: (optional) The axis along which the sparse categorical
cross-entropy is computed.
Default is -1, which corresponds to the last dimension
of the tensors.

Returns:
Integer tensor: The computed sparse categorical cross-entropy
loss between `target` and `output`.

Example:
```python
>>> target = keras_core.ops.convert_to_tensor([0, 1, 2], dtype=int32)
>>> output = keras_core.ops.convert_to_tensor([[0.9, 0.05, 0.05],
[0.1, 0.8, 0.1],
[0.2, 0.3, 0.5]],
dtype=float32)
>>> sparse_categorical_crossentropy(target, output)
array([0.10536056 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
```
"""
if any_symbolic_tensors((target, output)):
return SparseCategoricalCrossentropy(
from_logits=from_logits, axis=axis
Expand Down