-
Notifications
You must be signed in to change notification settings - Fork 12.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[flang][cuda] Propagate data attribute to global with initialization #95504
Merged
Conversation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Global with initial value were missing the CUDA data attribute.
@llvm/pr-subscribers-flang-fir-hlfir Author: Valentin Clement (バレンタイン クレメン) (clementval) ChangesGlobal with initial value were missing the CUDA data attribute. Full diff: https://github.com/llvm/llvm-project/pull/95504.diff 4 Files Affected:
diff --git a/flang/include/flang/Lower/ConvertConstant.h b/flang/include/flang/Lower/ConvertConstant.h
index c49cbbc6e7426..1bd11e9bacd61 100644
--- a/flang/include/flang/Lower/ConvertConstant.h
+++ b/flang/include/flang/Lower/ConvertConstant.h
@@ -64,7 +64,8 @@ fir::GlobalOp tryCreatingDenseGlobal(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Type symTy,
llvm::StringRef globalName,
mlir::StringAttr linkage, bool isConst,
- const Fortran::lower::SomeExpr &initExpr);
+ const Fortran::lower::SomeExpr &initExpr,
+ cuf::DataAttributeAttr dataAttr = {});
/// Lower a StructureConstructor that must be lowered in read only data although
/// it may not be wrapped into a Constant<T> (this may be the case for derived
diff --git a/flang/lib/Lower/ConvertConstant.cpp b/flang/lib/Lower/ConvertConstant.cpp
index 653e874a969c5..a4ace40a3a1c4 100644
--- a/flang/lib/Lower/ConvertConstant.cpp
+++ b/flang/lib/Lower/ConvertConstant.cpp
@@ -102,7 +102,8 @@ class DenseGlobalBuilder {
mlir::Location loc, mlir::Type symTy,
llvm::StringRef globalName,
mlir::StringAttr linkage, bool isConst,
- const Fortran::lower::SomeExpr &initExpr) {
+ const Fortran::lower::SomeExpr &initExpr,
+ cuf::DataAttributeAttr dataAttr) {
DenseGlobalBuilder globalBuilder;
std::visit(
Fortran::common::visitors{
@@ -119,7 +120,7 @@ class DenseGlobalBuilder {
},
initExpr.u);
return globalBuilder.tryCreatingGlobal(builder, loc, symTy, globalName,
- linkage, isConst);
+ linkage, isConst, dataAttr);
}
template <Fortran::common::TypeCategory TC, int KIND>
@@ -127,11 +128,12 @@ class DenseGlobalBuilder {
fir::FirOpBuilder &builder, mlir::Location loc, mlir::Type symTy,
llvm::StringRef globalName, mlir::StringAttr linkage, bool isConst,
const Fortran::evaluate::Constant<Fortran::evaluate::Type<TC, KIND>>
- &constant) {
+ &constant,
+ cuf::DataAttributeAttr dataAttr) {
DenseGlobalBuilder globalBuilder;
globalBuilder.tryConvertingToAttributes(builder, constant);
return globalBuilder.tryCreatingGlobal(builder, loc, symTy, globalName,
- linkage, isConst);
+ linkage, isConst, dataAttr);
}
private:
@@ -178,8 +180,8 @@ class DenseGlobalBuilder {
fir::GlobalOp tryCreatingGlobal(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Type symTy,
llvm::StringRef globalName,
- mlir::StringAttr linkage,
- bool isConst) const {
+ mlir::StringAttr linkage, bool isConst,
+ cuf::DataAttributeAttr dataAttr) const {
// Not a "trivial" intrinsic constant array, or empty array.
if (!attributeElementType || attributes.empty())
return {};
@@ -191,7 +193,8 @@ class DenseGlobalBuilder {
auto tensorTy =
mlir::RankedTensorType::get(tensorShape, attributeElementType);
auto init = mlir::DenseElementsAttr::get(tensorTy, attributes);
- return builder.createGlobal(loc, symTy, globalName, linkage, init, isConst);
+ return builder.createGlobal(loc, symTy, globalName, linkage, init, isConst,
+ /*isTarget=*/false, dataAttr);
}
llvm::SmallVector<mlir::Attribute> attributes;
@@ -202,9 +205,9 @@ class DenseGlobalBuilder {
fir::GlobalOp Fortran::lower::tryCreatingDenseGlobal(
fir::FirOpBuilder &builder, mlir::Location loc, mlir::Type symTy,
llvm::StringRef globalName, mlir::StringAttr linkage, bool isConst,
- const Fortran::lower::SomeExpr &initExpr) {
+ const Fortran::lower::SomeExpr &initExpr, cuf::DataAttributeAttr dataAttr) {
return DenseGlobalBuilder::tryCreating(builder, loc, symTy, globalName,
- linkage, isConst, initExpr);
+ linkage, isConst, initExpr, dataAttr);
}
//===----------------------------------------------------------------------===//
@@ -661,7 +664,7 @@ genOutlineArrayLit(Fortran::lower::AbstractConverter &converter,
T::category == Fortran::common::TypeCategory::Complex) {
global = DenseGlobalBuilder::tryCreating(
builder, loc, arrayTy, globalName, builder.createInternalLinkage(),
- true, constant);
+ true, constant, {});
}
if (!global)
// If the number of elements of the array is huge, the compilation may
diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp
index 80a80fd1d92ef..8c96123b14976 100644
--- a/flang/lib/Lower/ConvertVariable.cpp
+++ b/flang/lib/Lower/ConvertVariable.cpp
@@ -165,12 +165,15 @@ static fir::GlobalOp declareGlobal(Fortran::lower::AbstractConverter &converter,
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
if (fir::GlobalOp global = builder.getNamedGlobal(globalName))
return global;
+ const Fortran::semantics::Symbol &sym = var.getSymbol();
+ cuf::DataAttributeAttr dataAttr =
+ Fortran::lower::translateSymbolCUFDataAttribute(
+ converter.getFirOpBuilder().getContext(), sym);
// Always define linkonce data since it may be optimized out from the module
// that actually owns the variable if it does not refers to it.
if (linkage == builder.createLinkOnceODRLinkage() ||
linkage == builder.createLinkOnceLinkage())
- return defineGlobal(converter, var, globalName, linkage);
- const Fortran::semantics::Symbol &sym = var.getSymbol();
+ return defineGlobal(converter, var, globalName, linkage, dataAttr);
mlir::Location loc = genLocation(converter, sym);
// Resolve potential host and module association before checking that this
// symbol is an object of a function pointer.
@@ -179,9 +182,6 @@ static fir::GlobalOp declareGlobal(Fortran::lower::AbstractConverter &converter,
!Fortran::semantics::IsProcedurePointer(ultimate))
mlir::emitError(loc, "processing global declaration: symbol '")
<< toStringRef(sym.name()) << "' has unexpected details\n";
- cuf::DataAttributeAttr dataAttr =
- Fortran::lower::translateSymbolCUFDataAttribute(
- converter.getFirOpBuilder().getContext(), sym);
return builder.createGlobal(loc, converter.genType(var), globalName, linkage,
mlir::Attribute{}, isConstant(ultimate),
var.isTarget(), dataAttr);
@@ -510,7 +510,7 @@ static fir::GlobalOp defineGlobal(Fortran::lower::AbstractConverter &converter,
if (details->init()) {
global = Fortran::lower::tryCreatingDenseGlobal(
builder, loc, symTy, globalName, linkage, isConst,
- details->init().value());
+ details->init().value(), dataAttr);
if (global) {
global.setVisibility(mlir::SymbolTable::Visibility::Public);
return global;
diff --git a/flang/test/Lower/CUDA/cuda-data-attribute.cuf b/flang/test/Lower/CUDA/cuda-data-attribute.cuf
index f7f58a43a1439..192ef044913b9 100644
--- a/flang/test/Lower/CUDA/cuda-data-attribute.cuf
+++ b/flang/test/Lower/CUDA/cuda-data-attribute.cuf
@@ -4,15 +4,30 @@
! Test lowering of CUDA attribute on variables.
module cuda_var
+
+ type :: t1
+ integer :: a
+ end type
+
real, constant :: mod_a_rc
! CHECK: fir.global @_QMcuda_varEmod_a_rc {data_attr = #cuf.cuda<constant>} : f32
real, device :: mod_b_ra
! CHECK: fir.global @_QMcuda_varEmod_b_ra {data_attr = #cuf.cuda<device>} : f32
real, allocatable, managed :: mod_c_rm
! CHECK: fir.global @_QMcuda_varEmod_c_rm {data_attr = #cuf.cuda<managed>} : !fir.box<!fir.heap<f32>>
+
+ integer, device, dimension(10) :: mod_d_i_init = (/ (i, i = 1, 10) /)
+! CHECK: fir.global @_QMcuda_varEmod_d_i_init(dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : tensor<10xi32>) {data_attr = #cuf.cuda<device>} : !fir.array<10xi32>
+
+ real, device, dimension(10) :: mod_d_rinit = (/ (i, i = 1, 10) /)
+! CHECK: fir.global @_QMcuda_varEmod_d_rinit(dense<[{{.*}}]> : tensor<10xf32>) {data_attr = #cuf.cuda<device>} : !fir.array<10xf32>
+
real, allocatable, pinned :: mod_d_rp
! CHECK: fir.global @_QMcuda_varEmod_d_rp {data_attr = #cuf.cuda<pinned>} : !fir.box<!fir.heap<f32>>
+ type(t1), device :: mod_d_t(2)
+! CHECK: fir.global @_QMcuda_varEmod_d_t {data_attr = #cuf.cuda<device>} : !fir.array<2x!fir.type<_QMcuda_varTt1{a:i32}>>
+
contains
subroutine local_var_attrs
@@ -71,7 +86,7 @@ end
! CHECK-LABEL: func.func @_QMcuda_varPcuda_alloc_free
! CHECK: %[[ALLOC_A:.*]] = cuf.alloc !fir.array<10xf32> {bindc_name = "a", data_attr = #cuf.cuda<device>, uniq_name = "_QMcuda_varFcuda_alloc_freeEa"} -> !fir.ref<!fir.array<10xf32>>
-! CHECK: %[[SHAPE:.*]] = fir.shape %c10 : (index) -> !fir.shape<1>
+! CHECK: %[[SHAPE:.*]] = fir.shape %c10{{.*}} : (index) -> !fir.shape<1>
! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ALLOC_A]](%[[SHAPE]]) {data_attr = #cuf.cuda<device>, uniq_name = "_QMcuda_varFcuda_alloc_freeEa"} : (!fir.ref<!fir.array<10xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>)
! CHECK: %[[ALLOC_U:.*]] = cuf.alloc i32 {bindc_name = "u", data_attr = #cuf.cuda<unified>, uniq_name = "_QMcuda_varFcuda_alloc_freeEu"} -> !fir.ref<i32>
|
Renaud-K
approved these changes
Jun 14, 2024
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thank you. Looks good to me.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Global with initial value were missing the CUDA data attribute.