Skip to content

Commit

Permalink
Removing ttnn::TensorMemoryLayout::None (#1502)
Browse files Browse the repository at this point in the history
1.	Removed `None` from `ttnn::TensorMemoryLayout`:
	•	Going forward, when creating a tensor encoding attribute, you can set `TensorMemoryLayoutAttr` to `nullptr` to represent tensors on the host.
2.	Removed some duplicate methods from `TTNNLayout`:
	•	Removed duplicate methods such as `isSystemBufferType, isDeviceBufferType, and isL1BufferType`.
3.	`MemoryConfigAttr` Updates:
	•	`TensorMemoryLayout` is for now optional parameter. This change addresses cases where tensors are transferred from a device to the host using layout operation. In these scenarios, the attribute can be `nullptr`.
	•	This happens during conversion pass from `TTIR` to `TTNN` in ToLayoutOp. It looks that there is growing need to create new composite op in `TTNN` which will be used instead of `ToLayoutOp`.
4.	`TTNNLayout` Updates:
	•	`TensorMemoryLayout` is now optional parameter. For tensors on the host, this attribute can now be set to nullptr.
5.	Python API Adjustments/CAPI:
	•	When constructing `TTNNLayoutAttr` or `MemoryConfigAttr` via the Python API/CAPI, if `TensorMemoryLayout` is not provided, it will default to nullptr. 
	•	Updated the getter for `memory_layout_as_int` to raise an exception if TensorMemoryLayout is not set.
6.     TTNNWorkarounds:
        * TensorMemoryLayout workaround is now optional, since we want to handle cases where tensor is on host. In other places I did this by creating TensorMemoryLayoutAttr and if tensor is on host I set it to nullptr, but this is not an option for workarounds because it would require to introduce dependency on MlirContext, which is not needed for simple workaround.
  • Loading branch information
mtopalovicTT authored Dec 6, 2024
1 parent fa326aa commit 3578538
Show file tree
Hide file tree
Showing 50 changed files with 346 additions and 297 deletions.
20 changes: 14 additions & 6 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,21 +14,29 @@

namespace mlir::tt::ttnn {

inline bool isSystemBufferType(mlir::tt::ttnn::BufferType bufferType) {
return bufferType == mlir::tt::ttnn::BufferType::SystemMemory;
inline bool isSystemBufferType(BufferType bufferType) {
return bufferType == BufferType::SystemMemory;
}

inline bool isDeviceBufferType(mlir::tt::ttnn::BufferType bufferType) {
return bufferType == mlir::tt::ttnn::BufferType::L1 ||
bufferType == mlir::tt::ttnn::BufferType::DRAM ||
bufferType == mlir::tt::ttnn::BufferType::L1Small;
inline bool isDeviceBufferType(BufferType bufferType) {
return bufferType == BufferType::L1 || bufferType == BufferType::DRAM ||
bufferType == BufferType::L1Small;
}

inline bool isL1BufferType(BufferType bufferType) {
return bufferType == BufferType::L1;
}

inline bool isDRAMBufferType(BufferType bufferType) {
return bufferType == BufferType::DRAM;
}

inline bool isShardedMemoryLayout(TensorMemoryLayout layout) {
return layout == TensorMemoryLayout::HeightSharded ||
layout == TensorMemoryLayout::WidthSharded ||
layout == TensorMemoryLayout::BlockSharded;
}

} // namespace mlir::tt::ttnn

#define GET_ATTRDEF_CLASSES
Expand Down
77 changes: 40 additions & 37 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.td
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ def TTNN_MemoryConfigAttr : TTNN_Attr<"MemoryConfig", "memory_config"> {
TTNN memory config attribute
}];

let parameters = (ins AttrParameter<"TensorMemoryLayoutAttr", "">:$tensorMemoryLayout,
AttrParameter<"BufferTypeAttr", "">:$bufferType,
AttrParameter<"ShardSpecAttr", "">:$shardSpec);
let parameters = (ins AttrParameter<"BufferTypeAttr", "">:$bufferType,
AttrParameter<"ShardSpecAttr", "">:$shardSpec,
OptionalParameter<"TensorMemoryLayoutAttr">:$tensorMemoryLayout);

let assemblyFormat = "`<` params `>`";

Expand Down Expand Up @@ -124,48 +124,51 @@ def TTNN_TTNNLayoutAttr: TTNN_Attr<"TTNNLayout", "ttnn_layout"> {
let parameters = (ins AttrParameter<"AffineMap", "An affine map that defines how the logical tensor dimensions map to a grid shape.">:$linear,
AttrParameter<"GridAttr", "The grid shape that this tensor is divided onto.">:$grid,
AttrParameter<"MemRefType", "A memref that describes the physical footprint allocation of the shard. It must also have a shape with rank equal to grid.">:$memref,
DefaultValuedParameter<"TensorMemoryLayout", "TensorMemoryLayout::None", "The layout of the tensor in memory.">:$mem_layout);
OptionalParameter<"TensorMemoryLayoutAttr", "TTNN tensor memory layout">:$mem_layout);
let assemblyFormat = "`<` $linear`,` $grid`,` $memref (`,` $mem_layout^)? `>`";
let extraClassDeclaration = [{
static TTNNLayoutAttr get(::mlir::MLIRContext *context,
ArrayRef<int64_t> tensorShape,
Type elementType,
BufferType bufferType,
GridAttr grid,
TensorMemoryLayout memoryLayout,
TensorMemoryLayoutAttr memoryLayoutAttr = nullptr,
ArrayRef<std::pair<std::int64_t, std::int64_t>> collapseIntervals = {{0, -1}});
uint64_t getShardSizeInBytes() const;
BufferType getBufferType() const;
TTNNLayoutAttr withGrid(::mlir::MLIRContext *context, ArrayRef<int64_t> tensorShape, GridAttr grid, ArrayRef<std::pair<std::int64_t, std::int64_t>> collapseIntervals = {{0, -1}});
TTNNLayoutAttr withGrid(::mlir::MLIRContext *context,
RankedTensorType ty,
GridAttr grid,
ArrayRef<std::pair<std::int64_t, std::int64_t>> collapseIntervals = {{0, -1}});
TTNNLayoutAttr withElementType(::mlir::MLIRContext *context, Type elementType);
TTNNLayoutAttr withBufferType(::mlir::MLIRContext *context, BufferType bufferType);
TTNNLayoutAttr withMemoryLayout(::mlir::MLIRContext *context, TensorMemoryLayout memLayout);
TTNNLayoutAttr withShardShape(::mlir::MLIRContext *context, llvm::SmallVector<int64_t> shardShape);

bool isSystemBufferType() const { return ::mlir::tt::ttnn::isSystemBufferType(getBufferType()); }
bool isDeviceBufferType() const { return ::mlir::tt::ttnn::isDeviceBufferType(getBufferType()); }
bool hasShardedTensorMemoryLayout() const;
bool hasShardedL1TensorMemoryLayout() const;
bool hasInterleavedL1TensorMemoryLayout() const;
bool hasInterleavedDRAMTensorMemoryLayout() const;
bool hasL1BufferType() const;
bool hasDRAMBufferType() const;
bool isTiled() const;
Layout getLayout() const;
Type getElementType() const;
DataType getDataType() const;
uint64_t getElementSizeBytes() const;
int64_t getTensorSizeInBytes(ArrayRef<int64_t> tensorShape, ::mlir::tt::DeviceAttr device) const;
llvm::SmallVector<int64_t> getStride(ArrayRef<int64_t> logicalShape) const;
llvm::SmallVector<int64_t> getShardShape() const;
llvm::SmallVector<int64_t> getScalarShardShape() const;
AffineMap replaceMemoryMapSymbolsWithShardShape(AffineMap physicalMemoryMap) const;
AffineMap getIdentityTileLinearMap() const;
llvm::SmallVector<int64_t> getTiledShape(ArrayRef<int64_t> logicalTensorShape) const;

TTNNLayoutAttr withGrid(::mlir::MLIRContext *context, ArrayRef<int64_t> tensorShape, GridAttr grid, ArrayRef<std::pair<std::int64_t, std::int64_t>> collapseIntervals = {{0, -1}});
TTNNLayoutAttr withGrid(::mlir::MLIRContext *context,
RankedTensorType ty,
GridAttr grid,
ArrayRef<std::pair<std::int64_t, std::int64_t>> collapseIntervals = {{0, -1}});
TTNNLayoutAttr withElementType(::mlir::MLIRContext *context, Type elementType);
TTNNLayoutAttr withBufferType(::mlir::MLIRContext *context, BufferType bufferType);
TTNNLayoutAttr withMemoryLayout(::mlir::MLIRContext *context, TensorMemoryLayoutAttr memLayoutAttr);
TTNNLayoutAttr withMemoryLayout(::mlir::MLIRContext *context, TensorMemoryLayout memLayout);
TTNNLayoutAttr withShardShape(::mlir::MLIRContext *context, llvm::SmallVector<int64_t> shardShape);

bool isSystemBufferType() const { return ::mlir::tt::ttnn::isSystemBufferType(getBufferType()); }
bool isDeviceBufferType() const { return ::mlir::tt::ttnn::isDeviceBufferType(getBufferType()); }
bool isTiled() const;
bool hasShardedTensorMemoryLayout() const;
bool hasShardedL1TensorMemoryLayout() const;
bool hasInterleavedL1TensorMemoryLayout() const;
bool hasInterleavedDRAMTensorMemoryLayout() const;
bool hasDRAMBufferType() const;
bool hasL1BufferType() const;
Layout getLayout() const;
std::optional<TensorMemoryLayout> getMemLayoutOpt() const;
Type getElementType() const;
uint64_t getShardSizeInBytes() const;
BufferType getBufferType() const;
DataType getDataType() const;
uint64_t getElementSizeBytes() const;
int64_t getTensorSizeInBytes(ArrayRef<int64_t> tensorShape, ::mlir::tt::DeviceAttr device) const;
llvm::SmallVector<int64_t> getStride(ArrayRef<int64_t> logicalShape) const;
llvm::SmallVector<int64_t> getShardShape() const;
llvm::SmallVector<int64_t> getScalarShardShape() const;
AffineMap getIdentityTileLinearMap() const;
llvm::SmallVector<int64_t> getTiledShape(ArrayRef<int64_t> logicalTensorShape) const;
AffineMap replaceMemoryMapSymbolsWithShardShape(AffineMap physicalMemoryMap) const;
}];
}

Expand Down
2 changes: 0 additions & 2 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOpsEnums.td
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ def TTNN_Layout : I32EnumAttr<"Layout", "TTNN Layout",
let cppNamespace = "::mlir::tt::ttnn";
}

def TTNN_TensorMemoryLayout_None : I32EnumAttrCase<"None", 0, "none">;
def TTNN_TensorMemoryLayout_Interleaved : I32EnumAttrCase<"Interleaved", 1, "interleaved">;
def TTNN_TensorMemoryLayout_SingleBank : I32EnumAttrCase<"SingleBank", 2, "single_bank">;
def TTNN_TensorMemoryLayout_HeightSharded : I32EnumAttrCase<"HeightSharded", 3, "height_sharded">;
Expand All @@ -30,7 +29,6 @@ def TTNN_TensorMemoryLayout_BlockSharded : I32EnumAttrCase<"BlockSharded", 5, "b

def TTNN_TensorMemoryLayout : I32EnumAttr<"TensorMemoryLayout", "TTNN Tensor Memory Layout",
[
TTNN_TensorMemoryLayout_None,
TTNN_TensorMemoryLayout_Interleaved,
TTNN_TensorMemoryLayout_SingleBank,
TTNN_TensorMemoryLayout_HeightSharded,
Expand Down
5 changes: 3 additions & 2 deletions include/ttmlir/Dialect/TTNN/IR/TTNNWorkarounds.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,9 @@ struct WorkaroundResult {
// Target tensor buffer type.
std::pair<BufferType, bool> targetTensorBufferTypeResult;

// Target tensor memory layout.
std::pair<TensorMemoryLayout, bool> targetTensorMemoryLayoutResult;
// Target tensor memory layout. Can be nullopt for tensors on host.
std::pair<std::optional<TensorMemoryLayout>, bool>
targetTensorMemoryLayoutResult;

// Returns true if any of the workarounds were applied.
bool modified() const {
Expand Down
2 changes: 0 additions & 2 deletions include/ttmlir/Target/TTNN/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@ ::tt::target::TensorMemoryLayout toTargetTensorMemoryLayout(
return ::tt::target::TensorMemoryLayout::WidthSharded;
case ::mlir::tt::ttnn::TensorMemoryLayout::BlockSharded:
return ::tt::target::TensorMemoryLayout::BlockSharded;
case ::mlir::tt::ttnn::TensorMemoryLayout::None:
return ::tt::target::TensorMemoryLayout::None;
}

llvm_unreachable("Unsupported TensorMemoryLayout");
Expand Down
28 changes: 19 additions & 9 deletions lib/CAPI/TTNNAttrs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,9 @@ MlirAttribute ttmlirTTNNMemoryConfigAttrGet(
MlirContext ctx, MlirAttribute tensorMemoryLayoutAttr,
MlirAttribute bufferTypeAttr, MlirAttribute shardSpecAttr) {
return wrap(MemoryConfigAttr::get(
unwrap(ctx),
mlir::cast<TensorMemoryLayoutAttr>(unwrap(tensorMemoryLayoutAttr)),
mlir::cast<BufferTypeAttr>(unwrap(bufferTypeAttr)),
mlir::cast<ShardSpecAttr>(unwrap(shardSpecAttr))));
unwrap(ctx), mlir::cast<BufferTypeAttr>(unwrap(bufferTypeAttr)),
mlir::cast<ShardSpecAttr>(unwrap(shardSpecAttr)),
mlir::cast<TensorMemoryLayoutAttr>(unwrap(tensorMemoryLayoutAttr))));
}

MlirAttribute ttmlirTTNNShapeAttrGet(MlirContext ctx, int64_t *shape,
Expand All @@ -69,14 +68,25 @@ MlirAttribute ttmlirTTNNMeshShapeAttrGet(MlirContext ctx, int64_t y,
return wrap(MeshShapeAttr::get(unwrap(ctx), y, x));
}

// Get layout TTNNLayout attribute
//
// param ctx: mlir context
// param linear Affine map for mapping tensor from logical to physical space
// param grid Grid of cores where tensor is mapped to
// param memref Memref which holds shard size, shard scalar type and memory
// param memLayout Memory layout of the tensor
MlirAttribute ttmlirTTNNTTNNLayoutAttrGet(MlirContext ctx, MlirAffineMap linear,
MlirAttribute grid, MlirType memref,
unsigned memLayout) {
unsigned *memLayout = nullptr) {
mlir::AffineMap affineMap = mlir::AffineMap::getFromOpaquePointer(linear.ptr);
return wrap(TTNNLayoutAttr::get(unwrap(ctx), affineMap,
mlir::cast<GridAttr>(unwrap(grid)),
mlir::cast<MemRefType>(unwrap(memref)),
static_cast<TensorMemoryLayout>(memLayout)));
TensorMemoryLayoutAttr memLayoutAttr;
if (memLayout) {
memLayoutAttr = TensorMemoryLayoutAttr::get(
unwrap(ctx), static_cast<TensorMemoryLayout>(*memLayout));
}
return wrap(TTNNLayoutAttr::get(
unwrap(ctx), affineMap, mlir::cast<GridAttr>(unwrap(grid)),
mlir::cast<MemRefType>(unwrap(memref)), memLayoutAttr));
}

} // namespace mlir::tt::ttnn
25 changes: 9 additions & 16 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ class TensorEmptyConversionPattern
// If the tensor is not going to device, we can create the op without
// device-specific attributes
//
ttnn::TensorMemoryLayout memLayout = layoutAttr.getMemLayout();
if (memLayout == ttnn::TensorMemoryLayout::None) {
ttnn::TensorMemoryLayoutAttr memLayout = layoutAttr.getMemLayout();
if (!memLayout) {
rewriter.replaceOpWithNewOp<ttnn::EmptyOp>(
op, this->getTypeConverter()->convertType(op.getType()), nullptr,
shapeAttr, dTypeAttr, tensorLayoutAttr, nullptr);
Expand All @@ -79,12 +79,10 @@ class TensorEmptyConversionPattern
auto device = ::ttnn::utils::getOrInsertDevice(rewriter, op);
llvm::SmallVector<int64_t> shardShape = layoutAttr.getShardShape();
ttnn::MemoryConfigAttr memoryConfigAttr = ttnn::MemoryConfigAttr::get(
op.getContext(),
ttnn::TensorMemoryLayoutAttr::get(op.getContext(), memLayout),
ttnn::BufferTypeAttr::get(op.getContext(), bufferType),
op.getContext(), ttnn::BufferTypeAttr::get(op.getContext(), bufferType),
ttnn::ShardSpecAttr::get(
op.getContext(),
ttnn::ShapeAttr::get(op.getContext(), shardShape)));
op.getContext(), ttnn::ShapeAttr::get(op.getContext(), shardShape)),
memLayout);

rewriter.replaceOpWithNewOp<ttnn::EmptyOp>(
op, this->getTypeConverter()->convertType(op.getType()), device,
Expand Down Expand Up @@ -159,17 +157,13 @@ class ToLayoutOpConversionPattern
llvm::SmallVector<int64_t> outputShardShape =
outputLayoutAttr.getShardShape();

// Determine output memory config attr
ttnn::TensorMemoryLayout outputTensorMemoryLayout =
outputLayoutAttr.getMemLayout();
ttnn::MemoryConfigAttr outputMemConfigAttr = ttnn::MemoryConfigAttr::get(
rewriter.getContext(),
ttnn::TensorMemoryLayoutAttr::get(rewriter.getContext(),
outputTensorMemoryLayout),
ttnn::BufferTypeAttr::get(rewriter.getContext(), outputBufferType),
ttnn::ShardSpecAttr::get(
op.getContext(),
ttnn::ShapeAttr::get(rewriter.getContext(), outputShardShape)));
ttnn::ShapeAttr::get(rewriter.getContext(), outputShardShape)),
outputLayoutAttr.getMemLayout());

rewriter.replaceOpWithNewOp<ttnn::ToLayoutOp>(
op, this->getTypeConverter()->convertType(result), adaptor.getInput(),
Expand Down Expand Up @@ -950,11 +944,10 @@ class ArangeOpConversionPattern : public OpConversionPattern<ttir::ArangeOp> {

ttnn::MemoryConfigAttr memConfigAttr =
rewriter.getAttr<ttnn::MemoryConfigAttr>(
rewriter.getAttr<ttnn::TensorMemoryLayoutAttr>(
layoutAttr.getMemLayout()),
rewriter.getAttr<ttnn::BufferTypeAttr>(layoutAttr.getBufferType()),
rewriter.getAttr<ttnn::ShardSpecAttr>(
rewriter.getAttr<ttnn::ShapeAttr>(layoutAttr.getShardShape())));
rewriter.getAttr<ttnn::ShapeAttr>(layoutAttr.getShardShape())),
layoutAttr.getMemLayout());

rewriter.replaceOpWithNewOp<ttnn::ArangeOp>(
op, outputType, adaptor.getStart(), adaptor.getEnd(), adaptor.getStep(),
Expand Down
2 changes: 0 additions & 2 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,6 @@ emitc::OpaqueAttr convertTensorMemoryLayout(Builder &builder,
case ttnn::TensorMemoryLayout::WidthSharded:
return builder.getType<emitc::OpaqueAttr>(
"ttnn::TensorMemoryLayout::WIDTH_SHARDED");
case ttnn::TensorMemoryLayout::None:
llvm_unreachable("Unsupported ttnn::TensorMemoryLayout");
}
}

Expand Down
4 changes: 3 additions & 1 deletion lib/Dialect/TTNN/Analysis/DFShardingPolicy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -217,9 +217,11 @@ void DFShardingPolicy::pickOpShardLayouts(ShardSolver &shardSolver,
maxCoreUsage = accMaxCoreUsage[op][layoutIterator.index()];
selectedLayout = layoutIterator.get();
} else if (accMaxCoreUsage[op][layoutIterator.index()] == maxCoreUsage) {
assert(layoutIterator->getMemLayout() &&
"TensorMemoryLayout is not set");
// If we have a tie, prefer layout that is not BlockSharded.
//
if (layoutIterator->getMemLayout() !=
if (layoutIterator->getMemLayout().getValue() !=
ttnn::TensorMemoryLayout::BlockSharded) {
selectedLayout = layoutIterator.get();
}
Expand Down
24 changes: 11 additions & 13 deletions lib/Dialect/TTNN/IR/TTNNOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,10 +205,11 @@ ::mlir::LogicalResult mlir::tt::ttnn::EmptyOp::verify() {
//
if (getMemoryConfig().has_value()) {
ttnn::BufferType bufferType = layoutAttr.getBufferType();
ttnn::TensorMemoryLayout tensorMemoryLayout = layoutAttr.getMemLayout();
ttnn::TensorMemoryLayoutAttr tensorMemoryLayoutAttr =
layoutAttr.getMemLayout();
assert(bufferType == getMemoryConfig()->getBufferType().getValue());
assert(tensorMemoryLayout ==
getMemoryConfig()->getTensorMemoryLayout().getValue());
assert(tensorMemoryLayoutAttr ==
getMemoryConfig()->getTensorMemoryLayout());
}
//
// ==============================
Expand Down Expand Up @@ -547,9 +548,10 @@ ::mlir::LogicalResult mlir::tt::ttnn::EmbeddingOp::verify() {
//===----------------------------------------------------------------------===//

// Utility methods
static bool isValidDeviceLayout(TensorMemoryLayout layout) {
return layout == TensorMemoryLayout::Interleaved ||
isShardedMemoryLayout(layout);
static bool isValidDeviceLayout(TensorMemoryLayoutAttr memLayoutAttr) {
return memLayoutAttr &&
(memLayoutAttr.getValue() == TensorMemoryLayout::Interleaved ||
isShardedMemoryLayout(memLayoutAttr.getValue()));
}

// ToMemoryConfigOp verification
Expand All @@ -567,11 +569,7 @@ ::mlir::LogicalResult mlir::tt::ttnn::ToMemoryConfigOp::verify() {
return emitOpError("Output tensor type missing layout attribute");
}
BufferType outputBufferType = outputLayout.getBufferType();
TensorMemoryLayout outputMemoryLayout = outputLayout.getMemLayout();
if (isSystemBufferType(outputBufferType) &&
outputMemoryLayout != TensorMemoryLayout::None) {
return emitOpError("System memory space only supports undef memory layout");
}
TensorMemoryLayoutAttr outputMemoryLayout = outputLayout.getMemLayout();

if (isDeviceBufferType(outputBufferType) &&
!isValidDeviceLayout(outputMemoryLayout)) {
Expand All @@ -580,7 +578,7 @@ ::mlir::LogicalResult mlir::tt::ttnn::ToMemoryConfigOp::verify() {
}

if (outputBufferType == BufferType::DRAM &&
outputMemoryLayout != TensorMemoryLayout::Interleaved) {
outputMemoryLayout.getValue() != TensorMemoryLayout::Interleaved) {
return emitOpError(
"Device DRAM memory space only supports interleaved memory layout");
}
Expand All @@ -594,7 +592,7 @@ ::mlir::LogicalResult mlir::tt::ttnn::ToMemoryConfigOp::verify() {
if (shardShape.size() != 2) {
return emitOpError("Shard shape must be 2D");
}
if (outputMemoryLayout == TensorMemoryLayout::BlockSharded) {
if (outputMemoryLayout.getValue() == TensorMemoryLayout::BlockSharded) {
// TTNN tiles are (32, 32), shard shape must evenly divide the tile shape
if (shardShape[0] % TILE_HEIGHT != 0 or shardShape[1] % TILE_WIDTH != 0) {
return emitOpError(
Expand Down
Loading

0 comments on commit 3578538

Please sign in to comment.