-
Notifications
You must be signed in to change notification settings - Fork 12.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RISCV] RISCV vector calling convention (2/2) #79096
Conversation
@llvm/pr-subscribers-backend-risc-v @llvm/pr-subscribers-llvm-globalisel Author: Brandon Wu (4vtomat) Changes
Patch is 135.00 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/79096.diff 519 Files Affected:
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index acf6cbad1c74809..fbaa2aaa2064267 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -3206,6 +3206,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
}
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ llvm::TypeSize StructSize;
+ llvm::TypeSize PtrElementSize;
+ if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
+ StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ PtrElementSize =
+ CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(Ty));
+ if (STy->containsHomogeneousScalableVectorTypes()) {
+ assert(StructSize == PtrElementSize &&
+ "Only allow non-fractional movement of structure with"
+ "homogeneous scalable vector type");
+
+ ArgVals.push_back(ParamValue::forDirect(AI));
+ break;
+ }
+ }
+
Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
Arg->getName());
@@ -3214,53 +3233,29 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
- llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
- llvm::TypeSize PtrElementSize =
- CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType());
- if (StructSize.isScalable()) {
- assert(STy->containsHomogeneousScalableVectorTypes() &&
- "ABI only supports structure with homogeneous scalable vector "
- "type");
- assert(StructSize == PtrElementSize &&
- "Only allow non-fractional movement of structure with"
- "homogeneous scalable vector type");
- assert(STy->getNumElements() == NumIRArgs);
-
- llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto *AI = Fn->getArg(FirstIRArg + i);
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- LoadedStructValue =
- Builder.CreateInsertValue(LoadedStructValue, AI, i);
- }
+ uint64_t SrcSize = StructSize.getFixedValue();
+ uint64_t DstSize = PtrElementSize.getFixedValue();
- Builder.CreateStore(LoadedStructValue, Ptr);
+ Address AddrToStoreInto = Address::invalid();
+ if (SrcSize <= DstSize) {
+ AddrToStoreInto = Ptr.withElementType(STy);
} else {
- uint64_t SrcSize = StructSize.getFixedValue();
- uint64_t DstSize = PtrElementSize.getFixedValue();
-
- Address AddrToStoreInto = Address::invalid();
- if (SrcSize <= DstSize) {
- AddrToStoreInto = Ptr.withElementType(STy);
- } else {
- AddrToStoreInto =
- CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
- }
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = Fn->getArg(FirstIRArg + i);
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = Fn->getArg(FirstIRArg + i);
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
+ Builder.CreateStore(AI, EltPtr);
+ }
- if (SrcSize > DstSize) {
- Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
- }
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
@@ -5277,6 +5272,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
break;
}
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
+ llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
+ llvm::TypeSize SrcTypeSize;
+ llvm::TypeSize DstTypeSize;
+ if (STy && ArgInfo.isDirect() && !ArgInfo.getCanBeFlattened()) {
+ SrcTypeSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ if (STy->containsHomogeneousScalableVectorTypes()) {
+ assert(SrcTypeSize == DstTypeSize &&
+ "Only allow non-fractional movement of structure with "
+ "homogeneous scalable vector type");
+
+ IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
+ break;
+ }
+ }
+
// FIXME: Avoid the conversion through memory if possible.
Address Src = Address::invalid();
if (!I->isAggregate()) {
@@ -5292,54 +5305,30 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
- llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
- llvm::Type *SrcTy = Src.getElementType();
- llvm::TypeSize SrcTypeSize =
- CGM.getDataLayout().getTypeAllocSize(SrcTy);
- llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
- if (SrcTypeSize.isScalable()) {
- assert(STy->containsHomogeneousScalableVectorTypes() &&
- "ABI only supports structure with homogeneous scalable vector "
- "type");
- assert(SrcTypeSize == DstTypeSize &&
- "Only allow non-fractional movement of structure with "
- "homogeneous scalable vector type");
- assert(NumIRArgs == STy->getNumElements());
-
- llvm::Value *StoredStructValue =
- Builder.CreateLoad(Src, Src.getName() + ".tuple");
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- llvm::Value *Extract = Builder.CreateExtractValue(
- StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
- IRCallArgs[FirstIRArg + i] = Extract;
- }
+ uint64_t SrcSize = SrcTypeSize.getFixedValue();
+ uint64_t DstSize = DstTypeSize.getFixedValue();
+
+ // If the source type is smaller than the destination type of the
+ // coerce-to logic, copy the source value into a temp alloca the size
+ // of the destination type to allow loading all of it. The bits past
+ // the source value are left undef.
+ if (SrcSize < DstSize) {
+ Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
+ Src.getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ Src = TempAlloca;
} else {
- uint64_t SrcSize = SrcTypeSize.getFixedValue();
- uint64_t DstSize = DstTypeSize.getFixedValue();
-
- // If the source type is smaller than the destination type of the
- // coerce-to logic, copy the source value into a temp alloca the size
- // of the destination type to allow loading all of it. The bits past
- // the source value are left undef.
- if (SrcSize < DstSize) {
- Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
- Src.getName() + ".coerce");
- Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
- Src = TempAlloca;
- } else {
- Src = Src.withElementType(STy);
- }
+ Src = Src.withElementType(STy);
+ }
- assert(NumIRArgs == STy->getNumElements());
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Address EltPtr = Builder.CreateStructGEP(Src, i);
- llvm::Value *LI = Builder.CreateLoad(EltPtr);
- if (ArgHasMaybeUndefAttr)
- LI = Builder.CreateFreeze(LI);
- IRCallArgs[FirstIRArg + i] = LI;
- }
+ assert(NumIRArgs == STy->getNumElements());
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Address EltPtr = Builder.CreateStructGEP(Src, i);
+ llvm::Value *LI = Builder.CreateLoad(EltPtr);
+ if (ArgHasMaybeUndefAttr)
+ LI = Builder.CreateFreeze(LI);
+ IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index 0851d1993d0c0f5..245b111cef83bec 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -433,7 +433,13 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
- return ABIArgInfo::getDirect();
+ ABIArgInfo Info = ABIArgInfo::getDirect();
+
+ // If it is tuple type, it can't be flattened.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)))
+ Info.setCanBeFlattened(!STy->containsHomogeneousScalableVectorTypes());
+
+ return Info;
}
if (const VectorType *VT = Ty->getAs<VectorType>())
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c
index 1f790fe38065ae5..a324cb72b67ccf6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c
@@ -668,3291 +668,2260 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x2_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP2]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x2_f16mf4(vfloat16mf4x2_t src, size_t index) {
return __riscv_vget_v_f16mf4x2_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x3_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP3]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x3_f16mf4(vfloat16mf4x3_t src, size_t index) {
return __riscv_vget_v_f16mf4x3_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x4_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], <vscale x 1 x half> [[SRC_COERCE3]], 3
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP3]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP4]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x4_f16mf4(vfloat16mf4x4_t src, size_t index) {
return __riscv_vget_v_f16mf4x4_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x5_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], <vscale x 1 x half> [[SRC_COERCE4:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], <vscale x 1 x half> [[SRC_COERCE3]], 3
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP3]], <vscale x 1 x half> [[SRC_COERCE4]], 4
-// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP4]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP5]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x5_f16mf4(vfloat16mf4x5_t src, size_t index) {
return __riscv_vget_v_f16mf4x5_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x6_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], <vscale x 1 x half> [[SRC_COERCE4:%.*]], <vscale x 1 x half> [[SRC_COERCE5:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], <vscale x 1 x half> [[SRC_COERCE3]], 3
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP3]], <vscale x 1 x half> [[SRC_COERCE4]], 4
-// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP4]], <vscale x 1 x half> [[SRC_COERCE5]], 5
-// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP5]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP6]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x6_f16mf4(vfloat16mf4x6_t src, size_t index) {
return __riscv_vget_v_f16mf4x6_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x7_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], <vscale x 1 x half> [[SRC_COERCE4:%.*]], <vscale x 1 x half> [[SRC_COERCE5:%.*]], <vscale x 1 x half> [[SRC_COERCE6:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64...
[truncated]
|
9c11ca1
to
d23ecc7
Compare
d23ecc7
to
fd763f8
Compare
std::vector<Type *> TypeList; | ||
if (IsRet) | ||
TypeList.push_back(MF.getFunction().getReturnType()); | ||
else if (CLI) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Braces
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As LLVM Coding Standards mentioned, the else if
and for
both contain only 1 statement and without comments, we don't need braces for it, do we?
@@ -20543,6 +20531,121 @@ unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const { | |||
return Subtarget.getMinimumJumpTableEntries(); | |||
} | |||
|
|||
void RVVArgDispatcher::constructArgInfos(Type *Ty) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This seems to be reinventing splitToValueTypes
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's a bit different since splitToValueTypes
uses ComputeValueVTs
to get value type(s) and it can't handle vector tuple type. Same situation in SelectionDAGISel::LowerArguments
, they both use ComputeValueVTs
for getting value types, that's why RVVArgDispatcher
comes to help.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What is a "vector tuple type"? The DAG doesn't seem to rely on a similar helper
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
By vector tuple type, I mean RVV tuple type, for example: vint32m2x2_t
which needs 2 lmul2 vector registers. The llvm models it as struct {vint32m2_t, vint32m2_t}
and it would be flattened by backend which makes it impossible to differentiate normal vector type and vector tuple type in AssignFn
, that's why I added the helper to both GISel and normal selection DAG.
900c4af
to
db17c2b
Compare
Rebase |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: llvm#78550
a412858
to
1f2cde3
Compare
Rebase and squash. |
Hi, this commit has been identified as the root cause for a compiler crash with the following reproducer:
#87897 refers to the same root cause. Could we revert this back to green as a remediation? |
The issue is handled in this PR: 87736, thanks! |
@4vtomat the LLVM dev policy mandates reverts instead of fixes forward to keep the tree healthy. Please revert first to unblock all other teams that are hindered by this issue, investigate the issue offline, and then reapply the fixed patch. |
Got it, thanks~ |
This reverts commit 29e8bfc. This patch didn't handle vector return type correctly.
Return values are handled in a same way as function arguments. One thing to mention is that if a type can be broken down into homogeneous vector types, e.g. {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}}, it is considered as a vector tuple type and need to be handled by tuple type rule.
Bug fix: Handle RVV return type in calling convention correctly. Return values are handled in a same way as function arguments. One thing to mention is that if a type can be broken down into homogeneous vector types, e.g. {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}}, it is considered as a vector tuple type and need to be handled by tuple type rule.
…79096) (llvm#87736)" This reverts commit 91dd844.
…79096) (llvm#87736)" This reverts commit 91dd844.
…79096) (llvm#87736)" This reverts commit 91dd844.
…79096) (llvm#87736)" This reverts commit 91dd844.
…79096) (llvm#87736)" This reverts commit 91dd844.
…79096) (llvm#87736)" This reverts commit 91dd844.
…79096) (llvm#87736)" This reverts commit 91dd844.
This patch contains 2 commits, the first one is the same as #78550 and would be removed later.