diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 7dd36e9b04e52..2ad69f2b4abff 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -2377,13 +2377,6 @@ void Compiler::optAssertionGen(GenTree* tree) break; case GT_IND: - // Dynamic block copy sources could be zero-sized and so should not generate assertions. - if (tree->TypeIs(TYP_STRUCT)) - { - break; - } - FALLTHROUGH; - case GT_XAND: case GT_XORR: case GT_XADD: diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 28538fc2b9053..1e95a74eb8b79 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -8673,16 +8673,29 @@ void Lowering::LowerStoreIndirCoalescing(GenTreeIndir* ind) assert(prevData.IsStore()); assert(currData.IsStore()); - // For now, only constants are supported for data. - if (!prevData.value->OperIsConst() || !currData.value->OperIsConst()) + // Otherwise, the difference between two offsets has to match the size of the type. + // We don't support overlapping stores. + if (abs(prevData.offset - currData.offset) != (int)genTypeSize(prevData.targetType)) { return; } - // Otherwise, the difference between two offsets has to match the size of the type. - // We don't support overlapping stores. - if (abs(prevData.offset - currData.offset) != (int)genTypeSize(prevData.targetType)) + // At this point we know that the 2nd (current) STOREIND has the same side-effects as the previous STOREIND + // We can move the address part before the previous STOREIND to make STP friendly + auto makeStpFriendly = [&]() { +#ifdef TARGET_ARM64 + if (!GenTree::Compare(prevData.value, currData.value) || currData.value->IsVectorZero()) + { + LIR::Range Range = BlockRange().Remove(currData.rangeStart, ind->gtPrev); + BlockRange().InsertBefore(prevInd, std::move(Range)); + } +#endif + }; + + // For now, only constants are supported for data. + if (!prevData.value->OperIsConst() || !currData.value->OperIsConst()) { + makeStpFriendly(); return; } @@ -8715,6 +8728,7 @@ void Lowering::LowerStoreIndirCoalescing(GenTreeIndir* ind) // Base address being TYP_REF gives us a hint that data is pointer-aligned. if (!currData.baseAddr->TypeIs(TYP_REF)) { + makeStpFriendly(); return; } @@ -8807,9 +8821,15 @@ void Lowering::LowerStoreIndirCoalescing(GenTreeIndir* ind) break; } return; -#endif // TARGET_AMD64 -#endif // FEATURE_HW_INTRINSICS -#endif // TARGET_64BIT +#elif defined(TARGET_ARM64) // TARGET_AMD64 + case TYP_SIMD16: + // There is no TYP_SIMD32 to coalesce these two stores. + // At least, we can make it STP-friendly: + makeStpFriendly(); + return; +#endif // TARGET_ARM64 +#endif // FEATURE_HW_INTRINSICS +#endif // TARGET_64BIT // TYP_FLOAT and TYP_DOUBLE aren't needed here - they're expected to // be converted to TYP_INT/TYP_LONG for constant value.