diff --git a/codegen/xla_native_functions.yaml b/codegen/xla_native_functions.yaml index aa638f93aea..199025dc7e1 100644 --- a/codegen/xla_native_functions.yaml +++ b/codegen/xla_native_functions.yaml @@ -380,7 +380,6 @@ supported: - narrow_copy - pixel_shuffle - pixel_unshuffle - - reshape - select_backward - select.int - slice.Tensor @@ -413,8 +412,6 @@ symint: - narrow_copy - select_backward - select.int - # See Note: [functionalization and CompositeExplicitAutograd] - - reshape # See Note: [Disabling functionalization] - expand - view diff --git a/torch_xla/csrc/aten_xla_type.cpp b/torch_xla/csrc/aten_xla_type.cpp index 65eacaedcb2..be1679482ce 100644 --- a/torch_xla/csrc/aten_xla_type.cpp +++ b/torch_xla/csrc/aten_xla_type.cpp @@ -3653,16 +3653,6 @@ at::Tensor XLANativeFunctions::pixel_unshuffle(const at::Tensor& self, pixel_unshuffle)>::call(self, downscale_factor); } -at::Tensor XLANativeFunctions::reshape_symint(const at::Tensor& self, - c10::SymIntArrayRef shape) { - // See Note: [Disabling functionalization] - if (runtime::sys_util::GetEnvBool("XLA_DISABLE_FUNCTIONALIZATION", false)) { - return at::native::reshape_symint(self, shape); - } - return at::functionalization::functionalize_aten_op_symint::call(self, shape); -} - at::Tensor XLANativeFunctions::select_backward_symint( const at::Tensor& grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {