Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[22.11] python3Packages.tensorflow: add patches for many CVEs #224988

Merged
merged 2 commits into from
Apr 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,250 @@
Based on upstream 1295ae4dbb52fe06b19733b0257e2340d7b63b8d with
header reference changed to match location as of 2.10.1

diff --git a/tensorflow/compiler/tests/pooling_ops_test.py b/tensorflow/compiler/tests/pooling_ops_test.py
index 3d2695b15e9..3a7e22c02e5 100644
--- a/tensorflow/compiler/tests/pooling_ops_test.py
+++ b/tensorflow/compiler/tests/pooling_ops_test.py
@@ -18,7 +18,9 @@ import numpy as np

from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
@@ -560,6 +562,34 @@ class PoolGradTest(xla_test.XLATestCase):

self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)

+ @test_util.disable_mlir_bridge(
+ "TODO(b/266613412): investigate FPE in AvgPoolGrad for TPU"
+ )
+ def testAvgPoolGradSamePaddingZeroStrideZeroSize(self):
+ output_gradient_vals = np.array([0.39117979], dtype=np.float32)
+ output_gradient_vals = output_gradient_vals.reshape([1, 1, 1, 1])
+ with self.session() as sess:
+ with self.test_scope():
+ output_gradients = array_ops.placeholder(
+ dtypes.float32, shape=output_gradient_vals.shape
+ )
+ t = gen_nn_ops.avg_pool_grad(
+ orig_input_shape=[1, 0, 0, 0],
+ grad=output_gradients,
+ ksize=[1, 0, 0, 0],
+ strides=[1, 0, 0, 0],
+ padding="SAME",
+ data_format="NCHW",
+ )
+ with self.assertRaisesRegex(
+ errors.InvalidArgumentError,
+ (
+ "Sliding window ksize field for dimension 1 must be positive but"
+ " is 0"
+ ),
+ ):
+ sess.run(t, {output_gradients: output_gradient_vals})
+
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.

diff --git a/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc b/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
index 43422de2650..8243d925955 100644
--- a/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
@@ -33,15 +33,41 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/tensor_format.h"
+#include "tensorflow/core/platform/errors.h"

namespace tensorflow {
namespace {

+template <typename T>
+static Status ValidateKernelSizes(const T& ksizes) {
+ for (size_t i = 0; i < ksizes.size(); ++i) {
+ if (ksizes[i] <= 0) {
+ return errors::InvalidArgument(
+ "Sliding window ksize field for dimension ", i,
+ " must be positive but is ", ksizes[i]);
+ }
+ }
+ return OkStatus();
+}
+
+template <typename T>
+static Status ValidateStrides(const T& strides) {
+ for (size_t i = 0; i < strides.size(); ++i) {
+ if (strides[i] <= 0) {
+ return errors::InvalidArgument(
+ "Sliding window stride field for dimension ", i,
+ " must be positive but is ", strides[i]);
+ }
+ }
+ return OkStatus();
+}
+
// Superclass of pooling ops.
class PoolingOp : public XlaOpKernel {
public:
@@ -83,50 +109,54 @@ class PoolingOp : public XlaOpKernel {

protected:
StatusOr<std::vector<int64_t>> GetKernelSize(XlaOpKernelContext* ctx) {
- if (ctx->num_inputs() == 1) {
- return ksize_;
- }
- const TensorShape ksize_shape = ctx->InputShape(1);
- // Validate input sizes.
- if (!TensorShapeUtils::IsVector(ksize_shape)) {
- return errors::InvalidArgument("ksize must be a vector, not shape ",
- ksize_shape.DebugString());
- }
- if (ksize_shape.num_elements() != num_dims()) {
- return errors::InvalidArgument(
- "Sliding window ksize field must "
- "specify ",
- num_dims(), " dimensions");
- }
std::vector<int64_t> ksize;
- auto status = ctx->ConstantInputAsIntVector(1, &ksize);
- if (!status.ok()) {
- return status;
+ if (ctx->num_inputs() == 1) {
+ ksize = ksize_;
+ } else {
+ const TensorShape ksize_shape = ctx->InputShape(1);
+ // Validate input sizes.
+ if (!TensorShapeUtils::IsVector(ksize_shape)) {
+ return errors::InvalidArgument("ksize must be a vector, not shape ",
+ ksize_shape.DebugString());
+ }
+ if (ksize_shape.num_elements() != num_dims()) {
+ return errors::InvalidArgument(
+ "Sliding window ksize field must "
+ "specify ",
+ num_dims(), " dimensions");
+ }
+ auto status = ctx->ConstantInputAsIntVector(1, &ksize);
+ if (!status.ok()) {
+ return status;
+ }
}
+ TF_RETURN_IF_ERROR(ValidateKernelSizes(ksize));
return ksize;
}

StatusOr<std::vector<int64_t>> GetStride(XlaOpKernelContext* ctx) {
- if (ctx->num_inputs() == 1) {
- return stride_;
- }
- const TensorShape stride_shape = ctx->InputShape(2);
- // Validate input sizes.
- if (!TensorShapeUtils::IsVector(stride_shape)) {
- return errors::InvalidArgument("stride must be a vector, not shape ",
- stride_shape.DebugString());
- }
- if (stride_shape.num_elements() != num_dims()) {
- return errors::InvalidArgument(
- "Sliding window stride field must "
- "specify ",
- num_dims(), " dimensions");
- }
std::vector<int64_t> stride;
- auto status = ctx->ConstantInputAsIntVector(2, &stride);
- if (!status.ok()) {
- return status;
+ if (ctx->num_inputs() == 1) {
+ stride = stride_;
+ } else {
+ const TensorShape stride_shape = ctx->InputShape(2);
+ // Validate input sizes.
+ if (!TensorShapeUtils::IsVector(stride_shape)) {
+ return errors::InvalidArgument("stride must be a vector, not shape ",
+ stride_shape.DebugString());
+ }
+ if (stride_shape.num_elements() != num_dims()) {
+ return errors::InvalidArgument(
+ "Sliding window stride field must "
+ "specify ",
+ num_dims(), " dimensions");
+ }
+ auto status = ctx->ConstantInputAsIntVector(2, &stride);
+ if (!status.ok()) {
+ return status;
+ }
}
+ TF_RETURN_IF_ERROR(ValidateStrides(stride));
return stride;
}

@@ -355,10 +385,12 @@ class MaxPoolGradOp : public XlaOpKernel {
errors::InvalidArgument("Sliding window ksize field must "
"specify ",
num_dims(), " dimensions"));
+ OP_REQUIRES_OK(ctx, ValidateKernelSizes(ksize_));
OP_REQUIRES(ctx, stride_.size() == num_dims(),
errors::InvalidArgument("Sliding window strides field must "
"specify ",
num_dims(), " dimensions"));
+ OP_REQUIRES_OK(ctx, ValidateStrides(stride_));

const TensorShape tensor_in_shape = ctx->InputShape(0);
const TensorShape tensor_out_shape = ctx->InputShape(1);
@@ -446,11 +478,13 @@ class AvgPoolGradOp : public XlaOpKernel {
errors::InvalidArgument("Sliding window ksize field must "
"specify ",
num_dims(), " dimensions"));
+ OP_REQUIRES_OK(ctx, ValidateKernelSizes(ksize_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("strides", &stride_));
OP_REQUIRES(ctx, stride_.size() == num_dims(),
errors::InvalidArgument("Sliding window strides field must "
"specify ",
num_dims(), " dimensions"));
+ OP_REQUIRES_OK(ctx, ValidateStrides(stride_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("padding", &padding_));
OP_REQUIRES(ctx, padding_ != EXPLICIT,
errors::Unimplemented(
@@ -579,10 +613,12 @@ class MaxPoolGradGradOp : public XlaOpKernel {
errors::InvalidArgument("Sliding window ksize field must "
"specify ",
num_dims(), " dimensions"));
+ OP_REQUIRES_OK(ctx, ValidateKernelSizes(ksize_));
OP_REQUIRES(ctx, stride_.size() == num_dims(),
errors::InvalidArgument("Sliding window strides field must "
"specify ",
num_dims(), " dimensions"));
+ OP_REQUIRES_OK(ctx, ValidateStrides(stride_));

const TensorShape tensor_in_shape = ctx->InputShape(0);
const TensorShape tensor_out_shape = ctx->InputShape(1);
diff --git a/tensorflow/compiler/xla/client/padding.cc b/tensorflow/compiler/xla/client/padding.cc
index b9c1ce25b00..919434cb1f0 100644
--- a/tensorflow/compiler/xla/client/padding.cc
+++ b/tensorflow/compiler/xla/client/padding.cc
@@ -35,6 +35,16 @@ Status ValidatePaddingValues(absl::Span<const int64_t> input_dimensions,
input_dimensions.size(), window_dimensions.size(),
window_strides.size());
}
+ for (size_t i = 0; i < input_dimensions.size(); ++i) {
+ if (window_dimensions[i] <= 0) {
+ return InvalidArgument("Window dimension %u has non-positive size %d", i,
+ window_dimensions[i]);
+ }
+ if (window_strides[i] <= 0) {
+ return InvalidArgument("Window dimension %u has non-positive stride %d",
+ i, window_strides[i]);
+ }
+ }
return OkStatus();
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
Based on upstream 8ae76cf085f4be26295d2ecf2081e759e04b8acf with
minor adjustments to apply to 2.10.1

diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD
index d148d12a7d1..432e3891e77 100644
--- a/tensorflow/compiler/tests/BUILD
+++ b/tensorflow/compiler/tests/BUILD
@@ -2344,3 +2344,19 @@ tf_xla_py_test(
"//tensorflow/python:training",
],
)
+
+tf_xla_py_test(
+ name = "bincount_op_test",
+ size = "small",
+ srcs = ["bincount_op_test.py"],
+ enable_mlir_bridge = False,
+ python_version = "PY3",
+ shard_count = 10,
+ tags = [
+ "no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
+ ],
+ deps = [
+ ":xla_test",
+ "//tensorflow/python:platform_test",
+ ],
+)
diff --git a/tensorflow/compiler/tests/bincount_op_test.py b/tensorflow/compiler/tests/bincount_op_test.py
new file mode 100644
index 00000000000..79e8a7e91b8
--- /dev/null
+++ b/tensorflow/compiler/tests/bincount_op_test.py
@@ -0,0 +1,40 @@
+# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for bincount using the XLA JIT."""
+from tensorflow.compiler.tests import xla_test
+from tensorflow.python.framework import errors
+from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.platform import googletest
+
+
+class BincountTest(xla_test.XLATestCase):
+
+ def testInputRank0(self):
+ with self.session():
+ with self.test_scope():
+ bincount = gen_math_ops.bincount(arr=6, size=804, weights=[52, 351])
+
+ with self.assertRaisesRegex(
+ errors.InvalidArgumentError,
+ (
+ "`weights` must be the same shape as `arr` or a length-0"
+ " `Tensor`, in which case it acts as all weights equal to 1."
+ ),
+ ):
+ self.evaluate(bincount)
+
+
+if __name__ == "__main__":
+ googletest.main()
diff --git a/tensorflow/compiler/tf2xla/kernels/bincount_op.cc b/tensorflow/compiler/tf2xla/kernels/bincount_op.cc
index 2d4bba26239..42b37d2e0b2 100644
--- a/tensorflow/compiler/tf2xla/kernels/bincount_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/bincount_op.cc
@@ -62,21 +62,15 @@ class DenseBincountOp : public XlaOpKernel {
StatusOr<xla::Shape> input_shape_or = ctx->builder()->GetShape(input);
OP_REQUIRES_OK(ctx, input_shape_or.status());
auto input_shape = input_shape_or.ValueOrDie();
- auto size = input_shape.dimensions(0);

- if (!size) {
- output = xla::Broadcast(zero, {output_size});
- ctx->SetOutput(0, output);
- return;
- }
auto rank = input_shape.rank();

OP_REQUIRES(ctx, rank <= 2,
errors::InvalidArgument(
"Shape must be at most rank 2 but is rank ", rank));
-
xla::XlaOp weights = ctx->Input(2);
StatusOr<xla::Shape> weights_shape_or = ctx->builder()->GetShape(weights);
+
OP_REQUIRES_OK(ctx, weights_shape_or.status());

auto weights_shape = weights_shape_or.ValueOrDie();
@@ -91,11 +85,20 @@ class DenseBincountOp : public XlaOpKernel {
"1. Received ",
weights_shape.DebugString()));

+ auto size = input_shape.dimensions(0);
+
+ if (!size) {
+ output = xla::Broadcast(zero, {output_size});
+ ctx->SetOutput(0, output);
+ return;
+ }
+
auto weights_size = weights_shape.dimensions(0);
bool has_weights = false;
if (weights_size) {
has_weights = true;
}
+
xla::Shape output_shape = xla::ShapeUtil::MakeShape(dtype, {output_size});
xla::ScatterDimensionNumbers scatter_dnums;
scatter_dnums.set_index_vector_dim(1);
Loading