Skip to content

Commit

Permalink
Rename ConvLayoutAttr to ConvolutionLayout
Browse files Browse the repository at this point in the history
  • Loading branch information
LPanosTT committed Oct 28, 2024
1 parent 7c48056 commit 4f05514
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 51 deletions.
23 changes: 6 additions & 17 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -573,16 +573,16 @@ def TTIR_ConvolutionOp : TTIR_DPSOp<"convolution"> {
Optional<AnyRankedTensor>:$bias,
AnyRankedTensor:$output,
// Default value: one for each of the spatial dimension.
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "std::vector<int64_t>(getConvLayout().getInputSpatialDimensions().size(), 1)">:$window_strides,
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "SmallVector<int64_t>(getConvolutionLayout().getInputSpatialDimensions().size(), 1)">:$window_strides,
// Default value: two zeros for each of the spatial dimension.
DefaultValuedOptionalAttr<I64ElementsAttr, "std::vector<int64_t>(getConvLayout().getInputSpatialDimensions().size()*2, 0)">:$padding,
DefaultValuedOptionalAttr<I64ElementsAttr, "SmallVector<int64_t>(getConvolutionLayout().getInputSpatialDimensions().size()*2, 0)">:$padding,
// Default value: one for each of the spatial dimension.
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "std::vector<int64_t>(getConvLayout().getInputSpatialDimensions().size(), 1)">:$input_dilation,
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "SmallVector<int64_t>(getConvolutionLayout().getInputSpatialDimensions().size(), 1)">:$input_dilation,
// Default value: one for each of the spatial dimension.
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "std::vector<int64_t>(getConvLayout().getInputSpatialDimensions().size(), 1)">:$weight_dilation,
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "SmallVector<int64_t>(getConvolutionLayout().getInputSpatialDimensions().size(), 1)">:$weight_dilation,
// Default value: false for each of the spatial dimension.
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "ConvolutionOp::getDefaultWindowReversal(getConvLayout())">:$window_reversal,
TTIR_ConvLayoutAttr:$conv_layout,
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "SmallVector<bool>(getConvolutionLayout().getInputSpatialDimensions().size(), false)">:$window_reversal,
TTIR_ConvolutionLayoutAttr:$convolution_layout,
ConfinedAttr<I64Attr, [IntPositive]>:$feature_group_count,
ConfinedAttr<I64Attr, [IntPositive]>:$batch_group_count,
TT_OperandConstraintArrayAttr:$operand_constraints
Expand All @@ -593,17 +593,6 @@ def TTIR_ConvolutionOp : TTIR_DPSOp<"convolution"> {

let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }

static ArrayRef<bool> getDefaultWindowReversal(const ConvLayoutAttr &convLayout) {
static bool boolArray[1000];

assert (convLayout.getInputSpatialDimensions().size() < 100 && "Too many spatial dimensions");
for (uint32_t i = 0; i < convLayout.getInputSpatialDimensions().size(); i++) {
boolArray[i] = false;
}

return ArrayRef<bool>(boolArray, convLayout.getInputSpatialDimensions().size());
}
}];
}

Expand Down
6 changes: 3 additions & 3 deletions include/ttmlir/Dialect/TTIR/IR/TTIROpsAttrs.td
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
include "mlir/IR/AttrTypeBase.td"
include "ttmlir/Dialect/TTIR/IR/TTIRBase.td"

def TTIR_ConvLayoutAttr : AttrDef<TTIR_Dialect, "ConvLayout", [], "::mlir::Attribute"> {
let mnemonic = "conv";
let summary = "Structure of dimension information for conv op";
def TTIR_ConvolutionLayoutAttr : AttrDef<TTIR_Dialect, "ConvolutionLayout", [], "::mlir::Attribute"> {
let mnemonic = "convolution_layout";
let summary = "Structure of dimension information for convolution op";
let description = [{
Holds the layout information for the input activation, weights, and output.
}];
Expand Down
2 changes: 1 addition & 1 deletion lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ class StableHLOToTTIRConvolutionOpConversionPattern
mlir::Value(nullptr), outputTensor, adaptor.getWindowStridesAttr(),
adaptor.getPaddingAttr(), adaptor.getLhsDilationAttr(),
adaptor.getRhsDilationAttr(), adaptor.getWindowReversalAttr(),
mlir::tt::ttir::ConvLayoutAttr::get(
mlir::tt::ttir::ConvolutionLayoutAttr::get(
getContext(), dimNums.getInputBatchDimension(),
dimNums.getInputFeatureDimension(),
dimNums.getInputSpatialDimensions(),
Expand Down
20 changes: 12 additions & 8 deletions lib/Dialect/TTIR/IR/TTIROps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,29 +62,33 @@ ::mlir::LogicalResult mlir::tt::ttir::Conv2dOp::verify() {
return success();
}

//===----------------------------------------------------------------------===//
// ConvolutionOp
//===----------------------------------------------------------------------===//

::mlir::LogicalResult mlir::tt::ttir::ConvolutionOp::verify() {
if (getConvLayout().getInputSpatialDimensions().size() !=
getConvLayout().getOutputSpatialDimensions().size()) {
if (getConvolutionLayout().getInputSpatialDimensions().size() !=
getConvolutionLayout().getOutputSpatialDimensions().size()) {
return emitOpError("Convolution input, output, and kernel must have the "
"same number of spatial dimensions");
}
if (getConvLayout().getInputSpatialDimensions().size() !=
getConvLayout().getKernelSpatialDimensions().size()) {
if (getConvolutionLayout().getInputSpatialDimensions().size() !=
getConvolutionLayout().getKernelSpatialDimensions().size()) {
return emitOpError("Convolution input, output, and kernel must have the "
"same number of spatial dimensions");
}

// Subtract 2 from the rank as to not count batch and feature dimension
if (getInput().getType().getRank() - 2 !=
(int64_t)getConvLayout().getInputSpatialDimensions().size()) {
(int64_t)getConvolutionLayout().getInputSpatialDimensions().size()) {
return emitOpError("Input tensor must have the same number of spatial "
"dimensions as specified in the ConvLayout");
"dimensions as specified in the ConvolutionLayout");
}

if (getWeight().getType().getRank() - 2 !=
(int64_t)getConvLayout().getKernelSpatialDimensions().size()) {
(int64_t)getConvolutionLayout().getKernelSpatialDimensions().size()) {
return emitOpError("Weight tensor must have the same number of spatial "
"dimensions as specified in the ConvLayout");
"dimensions as specified in the ConvolutionLayout");
}

std::optional<::mlir::RankedTensorType> biasType =
Expand Down
39 changes: 21 additions & 18 deletions lib/Dialect/TTIR/Transforms/Transforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,13 +223,14 @@ static std::vector<TransposeDims> generateKernelTransposeIndices(
std::vector<int64_t> kernel_layout(
ttnn_convolution_kernel_layout.size(),
ConvolutionKernelDimension::INVALID_KERNEL_DIM);
kernel_layout[op.getConvLayout().getKernelOutputFeatureDimension()] =
kernel_layout[op.getConvolutionLayout().getKernelOutputFeatureDimension()] =
ConvolutionKernelDimension::OUTPUT_FEATURES;
kernel_layout[op.getConvLayout().getKernelInputFeatureDimension()] =
kernel_layout[op.getConvolutionLayout().getKernelInputFeatureDimension()] =
ConvolutionKernelDimension::INPUT_FEATURES;

int64_t spatial_count = 0;
for (int64_t spatial_dim : op.getConvLayout().getKernelSpatialDimensions()) {
for (int64_t spatial_dim :
op.getConvolutionLayout().getKernelSpatialDimensions()) {
kernel_layout[spatial_dim] = spatial_count;
spatial_count++;
}
Expand All @@ -256,13 +257,14 @@ static std::vector<TransposeDims> generateInputTransposeIndices(

std::vector<int64_t> input_layout(ttnn_convolution_layout.size(),
ConvolutionDimension::INVALID_DIM);
input_layout[op.getConvLayout().getInputBatchDimension()] =
input_layout[op.getConvolutionLayout().getInputBatchDimension()] =
ConvolutionDimension::BATCH;
input_layout[op.getConvLayout().getInputFeatureDimension()] =
input_layout[op.getConvolutionLayout().getInputFeatureDimension()] =
ConvolutionDimension::FEATURE;

int64_t spatial_count = 0;
for (int64_t spatial_dim : op.getConvLayout().getInputSpatialDimensions()) {
for (int64_t spatial_dim :
op.getConvolutionLayout().getInputSpatialDimensions()) {
input_layout[spatial_dim] = spatial_count;
spatial_count++;
}
Expand Down Expand Up @@ -295,13 +297,14 @@ static std::vector<TransposeDims> generateOutputTransposeIndices(

std::vector<int64_t> desired_output_layout(ttnn_convolution_layout.size(),
ConvolutionDimension::INVALID_DIM);
desired_output_layout[op.getConvLayout().getOutputBatchDimension()] =
desired_output_layout[op.getConvolutionLayout().getOutputBatchDimension()] =
ConvolutionDimension::BATCH;
desired_output_layout[op.getConvLayout().getOutputFeatureDimension()] =
desired_output_layout[op.getConvolutionLayout().getOutputFeatureDimension()] =
ConvolutionDimension::FEATURE;

int64_t spatial_count = 0;
for (int64_t spatial_dim : op.getConvLayout().getOutputSpatialDimensions()) {
for (int64_t spatial_dim :
op.getConvolutionLayout().getOutputSpatialDimensions()) {
desired_output_layout[spatial_dim] = spatial_count;
spatial_count++;
}
Expand Down Expand Up @@ -351,16 +354,16 @@ class ConvolutionToConv2dPatternRewriter

// Conv2d will have 2 spatial dimensions

assert(op.getConvLayout().getInputSpatialDimensions().size() ==
op.getConvLayout().getOutputSpatialDimensions().size() &&
assert(op.getConvolutionLayout().getInputSpatialDimensions().size() ==
op.getConvolutionLayout().getOutputSpatialDimensions().size() &&
"Convolution input, output, and kernel must have the same number of "
"spatial dimensions");
assert(op.getConvLayout().getInputSpatialDimensions().size() ==
op.getConvLayout().getKernelSpatialDimensions().size() &&
assert(op.getConvolutionLayout().getInputSpatialDimensions().size() ==
op.getConvolutionLayout().getKernelSpatialDimensions().size() &&
"Convolution input, output, and kernel must have the same number of "
"spatial dimensions");

if (op.getConvLayout().getInputSpatialDimensions().size() != 2) {
if (op.getConvolutionLayout().getInputSpatialDimensions().size() != 2) {
return failure();
}

Expand Down Expand Up @@ -409,10 +412,10 @@ class ConvolutionToConv2dPatternRewriter

auto output_shape = op.getResult().getType().getShape().vec();
std::vector<int64_t> new_output_shape = {
output_shape[op.getConvLayout().getOutputBatchDimension()],
output_shape[op.getConvLayout().getOutputSpatialDimensions()[0]],
output_shape[op.getConvLayout().getOutputSpatialDimensions()[1]],
output_shape[op.getConvLayout().getOutputFeatureDimension()]};
output_shape[op.getConvolutionLayout().getOutputBatchDimension()],
output_shape[op.getConvolutionLayout().getOutputSpatialDimensions()[0]],
output_shape[op.getConvolutionLayout().getOutputSpatialDimensions()[1]],
output_shape[op.getConvolutionLayout().getOutputFeatureDimension()]};

auto inputType = mlir::cast<RankedTensorType>(op.getInput().getType());
auto outputType =
Expand Down
3 changes: 0 additions & 3 deletions test/ttmlir/Conversion/StableHLOToTTIR/conv2d_op.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@ module @jit_convolution attributes {} {
window = {
stride = [1, 1],
pad = [[1, 1], [1, 1]],
lhs_dilate = [1, 1],
rhs_dilate = [1, 1],
reverse = [0, 0]
} {
feature_group_count = 1 : i64,
batch_group_count = 1 : i64,
Expand Down
2 changes: 1 addition & 1 deletion test/ttmlir/Dialect/TTNN/complex_conv_channel_first.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ module @jit_convolution {
// CHECK: %[[C:.*]] = "ttnn.conv2d"[[C:.*]]
%1 = "ttir.convolution"(%arg0, %arg1, %0) <{
batch_group_count = 1 : i64,
conv_layout = #ttir<conv
convolution_layout = #ttir<convolution_layout
input_batch = 0,
input_feature = 1,
input_spatial_dimensions = 2x3,
Expand Down

0 comments on commit 4f05514

Please sign in to comment.