Skip to content

Commit

Permalink
[MLIR][Bufferization] Choose default memory space in tensor copy inse…
Browse files Browse the repository at this point in the history
…rtion (#88500)

Tensor copy insertion currently uses memory_space = 0 when creating a
tensor copy using alloc_tensor. This memory space should instead be the
default memory space provided in bufferization options.
  • Loading branch information
Groverkss authored Apr 12, 2024
1 parent e0a6287 commit 6f1e23b
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 7 deletions.
7 changes: 4 additions & 3 deletions mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,10 +193,11 @@ FailureOr<Value> bufferization::allocateTensorForShapedValue(
FailureOr<BaseMemRefType> copyBufferType = getBufferType(tensor, options);
if (failed(copyBufferType))
return failure();
Attribute memorySpace = copyBufferType->getMemorySpace();
std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
if (!memorySpace)
memorySpace = b.getI64IntegerAttr(0);
allocTensorOp.setMemorySpaceAttr(memorySpace);
memorySpace = options.defaultMemorySpaceFn(tensorType);
if (memorySpace.has_value())
allocTensorOp.setMemorySpaceAttr(memorySpace.value());
return allocTensorOp.getResult();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
{
// The second alloc_tensor should not have a copy operand.
// CHECK: bufferization.alloc_tensor() : tensor<5xf32>
// CHECK: bufferization.alloc_tensor() {memory_space = 0 : i64} : tensor<5xf32>
// CHECK: bufferization.alloc_tensor() : tensor<5xf32>
%0 = bufferization.alloc_tensor() : tensor<5xf32>
%1 = tensor.insert %f into %0[%idx] : tensor<5xf32>
return %0, %1 : tensor<5xf32>, tensor<5xf32>
Expand All @@ -46,7 +46,7 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index)
func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32)
-> (tensor<5xf32>, tensor<5xf32>)
{
// CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {memory_space = 0 : i64} : tensor<5xf32>
// CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() : tensor<5xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
Expand All @@ -65,7 +65,7 @@ func.func @do_not_copy_when_result_not_read(%t: tensor<5xf32>, %f: f32)
-> (tensor<3xf32>)
{
%0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32>
// CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {memory_space = 0 : i64} : tensor<3xf32>
// CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() : tensor<3xf32>
// CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>)
%r = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>],
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64>
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {memory_space = 0 : i64} : tensor<8x8xf64>
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64>
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
Expand Down

0 comments on commit 6f1e23b

Please sign in to comment.