Skip to content

Commit

Permalink
[X86] combineLoad - try to reuse existing constant pool entries for s…
Browse files Browse the repository at this point in the history
…maller vector constant data (REAPPLIED)

If we already have a YMM/ZMM constant that a smaller XMM/YMM has matching lower bits, then ensure we reuse the same constant pool entry.

Extends the similar combines we already have to reuse VBROADCAST_LOAD/SUBV_BROADCAST_LOAD constant loads.

This is a mainly a canonicalization, but should make it easier for us to merge constant loads in a future commit (related to both #70947 and better X86FixupVectorConstantsPass usage for #71078).

Reapplied with fix to ensure we don't 'flip-flop' between multiple matching constants - only perform the fold if the new constant pool entry is larger than the current entry.
  • Loading branch information
RKSimon committed Nov 20, 2023
1 parent a2cf44b commit 59d14b6
Show file tree
Hide file tree
Showing 9 changed files with 1,770 additions and 1,779 deletions.
34 changes: 30 additions & 4 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49875,17 +49875,18 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
}
}

// If we also broadcast this to a wider type, then just extract the lowest
// subvector.
// If we also load/broadcast this to a wider type, then just extract the
// lowest subvector.
if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
(RegVT.is128BitVector() || RegVT.is256BitVector())) {
SDValue Ptr = Ld->getBasePtr();
SDValue Chain = Ld->getChain();
for (SDNode *User : Chain->uses()) {
if (User != N &&
(User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
User->getOpcode() == X86ISD::VBROADCAST_LOAD) &&
cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
ISD::isNormalLoad(User)) &&
cast<MemSDNode>(User)->getChain() == Chain &&
!User->hasAnyUseOfValue(1) &&
User->getValueSizeInBits(0).getFixedValue() >
RegVT.getFixedSizeInBits()) {
Expand Down Expand Up @@ -49916,6 +49917,31 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
}
}
}
if (ISD::isNormalLoad(User)) {
// See if we are loading a constant that matches in the lower
// bits of a longer constant (but from a different constant pool ptr).
SDValue UserPtr = cast<LoadSDNode>(User)->getBasePtr();
const Constant *LdC = getTargetConstantFromBasePtr(Ptr);
const Constant *UserC = getTargetConstantFromBasePtr(UserPtr);
if (LdC && UserC && UserPtr != Ptr &&
LdC->getType()->getPrimitiveSizeInBits() <
UserC->getType()->getPrimitiveSizeInBits()) {
APInt Undefs, UserUndefs;
SmallVector<APInt> Bits, UserBits;
if (getTargetConstantBitsFromNode(SDValue(N, 0), 8, Undefs, Bits) &&
getTargetConstantBitsFromNode(SDValue(User, 0), 8, UserUndefs,
UserBits)) {
UserUndefs = UserUndefs.trunc(Undefs.getBitWidth());
UserBits.truncate(Bits.size());
if (Bits == UserBits && UserUndefs.isSubsetOf(Undefs)) {
SDValue Extract = extractSubVector(
SDValue(User, 0), 0, DAG, SDLoc(N), RegVT.getSizeInBits());
Extract = DAG.getBitcast(RegVT, Extract);
return DCI.CombineTo(N, Extract, SDValue(User, 1));
}
}
}
}
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1400,7 +1400,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-64-LABEL: f4xi64_i128:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1]
; AVX-64-NEXT: vpaddq %xmm2, %xmm1, %xmm1
; AVX-64-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
Expand Down Expand Up @@ -1535,7 +1535,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm3 = [2,3]
; AVX-64-NEXT: vpaddq %xmm3, %xmm2, %xmm2
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm4 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
; AVX-64-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1]
; AVX-64-NEXT: vpaddq %xmm4, %xmm1, %xmm1
; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2157,7 +2157,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7,8,9,10],ymm11[11],ymm10[12,13,14,15]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm11 = ymm10[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm8, %ymm11, %ymm8
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm11 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm11, %xmm12
Expand Down Expand Up @@ -2329,7 +2329,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = <2,5,1,u,4,u,u,u>
; AVX2-FAST-NEXT: vpermd %ymm11, %ymm12, %ymm11
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
; AVX2-FAST-NEXT: vpblendvb %ymm11, %ymm10, %ymm12, %ymm10
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm6[2],ymm4[3,4,5],ymm6[6],ymm4[7]
; AVX2-FAST-NEXT: vextracti128 $1, %ymm12, %xmm13
Expand Down Expand Up @@ -2496,7 +2496,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5,6,7,8,9,10],ymm12[11],ymm11[12,13,14,15]
; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm11, %ymm8, %ymm12, %ymm8
; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm12 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm12, %xmm13
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1685,7 +1685,7 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-ONLY-NEXT: # ymm10 = mem[0,1,0,1]
; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm7, %ymm8, %ymm7
; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
; AVX2-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0>
; AVX2-ONLY-NEXT: vpblendvb %ymm10, %ymm6, %ymm7, %ymm6
; AVX2-ONLY-NEXT: vmovdqa 144(%rdi), %xmm7
; AVX2-ONLY-NEXT: vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[1,6,11]
Expand Down
15 changes: 7 additions & 8 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1238,13 +1238,12 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm6[4,5,6,7]
; AVX512F-NEXT: vmovdqa (%rdx), %ymm6
; AVX512F-NEXT: vmovdqa 32(%rdx), %ymm7
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = <5,5,u,6,6,u,7,7>
; AVX512F-NEXT: vpermd %ymm7, %ymm9, %ymm9
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
; AVX512F-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512F-NEXT: vpshufb %ymm10, %ymm7, %ymm7
; AVX512F-NEXT: vinserti64x4 $1, %ymm9, %zmm7, %zmm7
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512F-NEXT: vpshufb %ymm9, %ymm7, %ymm10
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = <5,5,u,6,6,u,7,7>
; AVX512F-NEXT: vpermd %ymm7, %ymm11, %ymm7
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
; AVX512F-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
; AVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
; AVX512F-NEXT: vmovdqa (%rdi), %ymm3
; AVX512F-NEXT: vpshufb %ymm5, %ymm3, %ymm3
Expand All @@ -1259,7 +1258,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[4,5,6,7]
; AVX512F-NEXT: vpshufb %ymm10, %ymm6, %ymm1
; AVX512F-NEXT: vpshufb %ymm9, %ymm6, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = <u,0,0,u,1,1,u,2>
; AVX512F-NEXT: vpermd %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
Expand Down
34 changes: 16 additions & 18 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2831,15 +2831,15 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,0]
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm9, %zmm2
; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm2
; AVX512F-SLOW-NEXT: vmovdqa (%r8), %ymm9
; AVX512F-SLOW-NEXT: vmovdqa 32(%r8), %ymm10
; AVX512F-SLOW-NEXT: vmovdqa (%r8), %ymm10
; AVX512F-SLOW-NEXT: vmovdqa 32(%r8), %ymm9
; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
; AVX512F-SLOW-NEXT: vpshufb %ymm11, %ymm10, %ymm4
; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
; AVX512F-SLOW-NEXT: vpshufb %ymm11, %ymm9, %ymm4
; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,1,1]
; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
; AVX512F-SLOW-NEXT: vpandnq %ymm10, %ymm21, %ymm10
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm4, %zmm10, %zmm10
; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm10
; AVX512F-SLOW-NEXT: vpandnq %ymm9, %ymm21, %ymm9
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm4, %zmm9, %zmm9
; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm9
; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %xmm2
; AVX512F-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
; AVX512F-SLOW-NEXT: vpshufb %xmm13, %xmm4, %xmm4
Expand All @@ -2860,7 +2860,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
; AVX512F-SLOW-NEXT: vpternlogq $226, %zmm2, %zmm7, %zmm4
; AVX512F-SLOW-NEXT: vpbroadcastq (%r8), %ymm2
; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm9[0,1,1,1]
; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm10[0,1,1,1]
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm18[0,1,2,1,4,5,6,5]
Expand Down Expand Up @@ -2909,16 +2909,15 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
; AVX512F-SLOW-NEXT: vpternlogq $226, %zmm0, %zmm16, %zmm1
; AVX512F-SLOW-NEXT: vpbroadcastq 16(%r8), %ymm0
; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
; AVX512F-SLOW-NEXT: vpandn %ymm0, %ymm3, %ymm0
; AVX512F-SLOW-NEXT: vpshufb %ymm11, %ymm9, %ymm3
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
; AVX512F-SLOW-NEXT: vpshufb %ymm11, %ymm10, %ymm0
; AVX512F-SLOW-NEXT: vpbroadcastq 16(%r8), %ymm3
; AVX512F-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, 64(%r9)
; AVX512F-SLOW-NEXT: vmovdqa64 %zmm4, 256(%r9)
; AVX512F-SLOW-NEXT: vmovdqa64 %zmm2, (%r9)
; AVX512F-SLOW-NEXT: vmovdqa64 %zmm10, 192(%r9)
; AVX512F-SLOW-NEXT: vmovdqa64 %zmm9, 192(%r9)
; AVX512F-SLOW-NEXT: vmovdqa64 %zmm19, 128(%r9)
; AVX512F-SLOW-NEXT: vzeroupper
; AVX512F-SLOW-NEXT: retq
Expand Down Expand Up @@ -3019,11 +3018,10 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm7, %zmm13, %zmm7
; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
; AVX512F-FAST-NEXT: vpternlogq $226, %zmm3, %zmm20, %zmm7
; AVX512F-FAST-NEXT: vmovdqa64 %ymm24, %ymm3
; AVX512F-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX512F-FAST-NEXT: vpbroadcastq 16(%r8), %ymm3
; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
; AVX512F-FAST-NEXT: vpandn %ymm3, %ymm13, %ymm3
; AVX512F-FAST-NEXT: vmovdqa64 %ymm24, %ymm11
; AVX512F-FAST-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX512F-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; AVX512F-FAST-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
; AVX512F-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
Expand Down
Loading

0 comments on commit 59d14b6

Please sign in to comment.