diff --git a/src/mygrad/math/misc/funcs.py b/src/mygrad/math/misc/funcs.py index 90538727..e141dab8 100644 --- a/src/mygrad/math/misc/funcs.py +++ b/src/mygrad/math/misc/funcs.py @@ -439,7 +439,12 @@ def minimum( @implements_numpy_override() def clip( - a: ArrayLike, a_min: ArrayLike, a_max: ArrayLike, *, constant: Optional[bool] = None + a: ArrayLike, + a_min: ArrayLike, + a_max: ArrayLike, + out: Optional[Union[np.ndarray, Tensor]] = None, + *, + constant: Optional[bool] = None, ) -> Tensor: """Clip (limit) the values in an array. @@ -470,6 +475,11 @@ def clip( `None`. If `a_min` or `a_max` are ArrayLike, then the three arrays will be broadcasted to match their shapes. + out : Optional[Union[ndarray, Tensor]] + A location into which the result is stored. If provided, it must have + a shape that the inputs broadcast to. If not provided or None, a + freshly-allocated tensor is returned. + constant : bool, optional(default=False) If ``True``, the returned tensor is a constant (it does not backpropagate a gradient) @@ -495,11 +505,10 @@ def clip( raise ValueError("`a_min` and `a_max` cannot both be set to `None`") if a_min is not None: - a = maximum(a_min, a, constant=constant) + a = maximum(a_min, a, out=out, constant=constant) if a_max is not None: - a = minimum(a_max, a, constant=constant) - + a = minimum(a_max, a, out=out, constant=constant) return a diff --git a/src/mygrad/tensor_base.py b/src/mygrad/tensor_base.py index 37eb5d30..8a3abbb3 100644 --- a/src/mygrad/tensor_base.py +++ b/src/mygrad/tensor_base.py @@ -3130,7 +3130,12 @@ def any( return np.any(self.data, axis=axis, out=out, keepdims=keepdims) def clip( - self, a_min: ArrayLike, a_max: ArrayLike, *, constant: Optional[bool] = None + self, + a_min: ArrayLike, + a_max: ArrayLike, + out: Optional[Union[np.ndarray, "Tensor"]] = None, + *, + constant: Optional[bool] = None, ) -> "Tensor": # pragma: no cover """Clip (limit) the values in an array. @@ -3158,6 +3163,11 @@ def clip( `None`. If `a_min` or `a_max` are ArrayLike, then the three arrays will be broadcasted to match their shapes. + out : Optional[Union[ndarray, Tensor]] + A location into which the result is stored. If provided, it must have + a shape that the inputs broadcast to. If not provided or None, a + freshly-allocated tensor is returned. + constant : bool, optional(default=False) If ``True``, the returned tensor is a constant (it does not backpropagate a gradient) diff --git a/tests/math/test_misc.py b/tests/math/test_misc.py index 41739c20..c1ca075c 100644 --- a/tests/math/test_misc.py +++ b/tests/math/test_misc.py @@ -106,13 +106,6 @@ def amax_clip_only(clip_func, a, b, constant=False): ) -skip_if_lower_than_numpy_1p17 = pytest.mark.skipif( - np.__version__ < "1.17", - reason="numpy.clip behavior was made consistent in numpy-1.17; " - "test must by run on numpy 1.17 or later", -) - - @pytest.mark.parametrize( ("mygrad_clip", "numpy_clip", "num_arrays"), [ @@ -129,7 +122,6 @@ def amax_clip_only(clip_func, a, b, constant=False): (clip, np.clip, 3), ], ) -@skip_if_lower_than_numpy_1p17 def test_clip_fwd(mygrad_clip: Callable, numpy_clip: Callable, num_arrays: int): @fwdprop_test_factory( num_arrays=num_arrays, mygrad_func=mygrad_clip, true_func=numpy_clip @@ -162,7 +154,6 @@ def is_not_close_clip(a: Tensor, a_min=None, a_max=None) -> bool: (clip, np.clip, 3), ], ) -@skip_if_lower_than_numpy_1p17 def test_clip_bkwd(mygrad_clip: Callable, numpy_clip: Callable, num_arrays: int): @backprop_test_factory( num_arrays=num_arrays, @@ -193,7 +184,6 @@ def wrapped_test(): dtype=float, ), ) -@skip_if_lower_than_numpy_1p17 @pytest.mark.filterwarnings("ignore: invalid value") def test_clip_input_validation(a, a_min, a_max): try: @@ -218,3 +208,18 @@ def test_clip_method_bkwd(): x = mg.tensor([1.0, 5.0, 10.0]) x.clip(2, 7).backward() assert_allclose(x.grad, [0.0, 1.0, 0.0]) + + +@pytest.mark.parametrize("as_method", [False, True]) +def test_inplace_clip(as_method: bool): + x = mg.arange(4.0) + y = mg.tensor([-1.0, 2.0, 3.0]) + + clipper = y.clip if as_method else partial(mg.clip, y) + out = clipper(0, 2.1, out=x[1:]) + out.backward() + + assert_allclose(out, mg.tensor([0.0, 2.0, 2.1])) + assert_allclose(x, mg.tensor([0.0, 0.0, 2.0, 2.1])) + assert_allclose(x.grad, np.array([0.0, 1.0, 1.0, 1.0])) + assert_allclose(y.grad, np.array([0.0, 1.0, 0.0]))