diff --git a/build_tools/autogen_ltc_backend.yaml b/build_tools/autogen_ltc_backend.yaml index 37115fa98e4..b8a8905e06c 100644 --- a/build_tools/autogen_ltc_backend.yaml +++ b/build_tools/autogen_ltc_backend.yaml @@ -1,13 +1,14 @@ blacklist: # List of unsupported ops in LTC autogen because of some error +- _index_put_impl_ # Error: TODO not sure if there are other valid types to handle here - empty_like # Error: TODO add support for type BaseType(name=) - index.Tensor # Error: TODO not sure if there are other valid types to handle here - index_put # Error: TODO not sure if there are other valid types to handle here - index_put_ # Error: TODO not sure if there are other valid types to handle here -- _index_put_impl_ # Error: TODO not sure if there are other valid types to handle here - stack # Error: TODO not sure if there are other valid types to handle here # Additional ops which autogen is supported for but don't compile yet +- _convolution - detach - item - size diff --git a/e2e_testing/torchscript/xfail_sets.py b/e2e_testing/torchscript/xfail_sets.py index 3d5d44aaf43..513145b4263 100644 --- a/e2e_testing/torchscript/xfail_sets.py +++ b/e2e_testing/torchscript/xfail_sets.py @@ -175,6 +175,11 @@ } LTC_XFAIL_SET = { + "_Convolution2DAllFalseModule_basic", + "_Convolution2DBenchmarkModule_basic", + "_Convolution2DCudnnModule_basic", + "_Convolution2DDeterministicModule_basic", + "_Convolution2DTF32Module_basic", "AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule_basic", "AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic", "AddIntModule_basic", diff --git a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td index f9230e36085..a9bba698ea2 100644 --- a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td @@ -5293,32 +5293,6 @@ def Torch_AtenSelectIntOp : Torch_Op<"aten.select.int", [ }]; } -def Torch_AtenSelectScatterOp : Torch_Op<"aten.select_scatter", [ - AllowsTypeRefinement, - HasValueSemantics, - ReadOnly - ]> { - let summary = "Generated op for `aten::select_scatter : (Tensor, Tensor, int, int) -> (Tensor)`"; - let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$src, - Torch_IntType:$dim, - Torch_IntType:$index - ); - let results = (outs - AnyTorchTensorType:$result - ); - let hasCustomAssemblyFormat = 1; - let extraClassDefinition = [{ - ParseResult AtenSelectScatterOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 4, 1); - } - void AtenSelectScatterOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 4, 1); - } - }]; -} - def Torch_AtenSizeIntOp : Torch_Op<"aten.size.int", [ AllowsTypeRefinement, HasValueSemantics, @@ -5774,34 +5748,6 @@ def Torch_AtenSliceTensorOp : Torch_Op<"aten.slice.Tensor", [ }]; } -def Torch_AtenSliceScatterOp : Torch_Op<"aten.slice_scatter", [ - AllowsTypeRefinement, - HasValueSemantics, - ReadOnly - ]> { - let summary = "Generated op for `aten::slice_scatter : (Tensor, Tensor, int, int?, int?, int) -> (Tensor)`"; - let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$src, - Torch_IntType:$dim, - AnyTorchOptionalIntType:$start, - AnyTorchOptionalIntType:$end, - Torch_IntType:$step - ); - let results = (outs - AnyTorchTensorType:$result - ); - let hasCustomAssemblyFormat = 1; - let extraClassDefinition = [{ - ParseResult AtenSliceScatterOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 6, 1); - } - void AtenSliceScatterOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 6, 1); - } - }]; -} - def Torch_AtenLenTensorOp : Torch_Op<"aten.len.Tensor", [ AllowsTypeRefinement, HasValueSemantics, diff --git a/python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py b/python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py index c3722bb355e..8ccba234e29 100644 --- a/python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py +++ b/python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py @@ -437,7 +437,6 @@ def emit_with_mutating_variants(key, **kwargs): emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)") emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)") emit("aten::select.int : (Tensor, int, int) -> (Tensor)") - emit("aten::select_scatter : (Tensor, Tensor, int, int) -> (Tensor)") emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True) emit("aten::stack : (Tensor[], int) -> (Tensor)") emit("aten::sum : (Tensor, int?) -> (Tensor)") @@ -456,7 +455,6 @@ def emit_with_mutating_variants(key, **kwargs): emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)") emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)") - emit("aten::slice_scatter : (Tensor, Tensor, int, int?, int?, int) -> (Tensor)") emit("aten::len.Tensor : (Tensor) -> (int)") emit("aten::cpu : (Tensor) -> (Tensor)") emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")