Skip to content

Commit

Permalink
[X86] Peek through bitcast to find more opportunity for VPERMV3 -> VP…
Browse files Browse the repository at this point in the history
…ERMV combine

A follow up of #96414
  • Loading branch information
phoebewang committed Jun 30, 2024
1 parent d9f1166 commit eed7c5e
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 22 deletions.
10 changes: 6 additions & 4 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41336,8 +41336,9 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
case X86ISD::VPERMV3: {
// VPERM[I,T]2[B,W] are 3 uops on Skylake and Icelake so we try to use
// VPERMV.
SDValue V1 = N.getOperand(0);
SDValue V2 = N.getOperand(2);
SDValue V1 = peekThroughBitcasts(N.getOperand(0));
SDValue V2 = peekThroughBitcasts(N.getOperand(2));
MVT SVT = V1.getSimpleValueType();
MVT EVT = VT.getVectorElementType();
MVT NVT = VT.getDoubleNumVectorElementsVT();
if ((EVT == MVT::i8 || EVT == MVT::i16) &&
Expand All @@ -41346,14 +41347,15 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
V1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
V1.getConstantOperandVal(1) == 0 &&
V2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
V2.getConstantOperandVal(1) == VT.getVectorNumElements() &&
V2.getConstantOperandVal(1) == SVT.getVectorNumElements() &&
V1.getOperand(0) == V2.getOperand(0)) {
SDValue Mask =
DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NVT, DAG.getUNDEF(NVT),
N.getOperand(1), DAG.getIntPtrConstant(0, DL));
return DAG.getNode(
ISD::EXTRACT_SUBVECTOR, DL, VT,
DAG.getNode(X86ISD::VPERMV, DL, NVT, Mask, V1.getOperand(0)),
DAG.getNode(X86ISD::VPERMV, DL, NVT, Mask,
DAG.getBitcast(NVT, V1.getOperand(0))),
DAG.getIntPtrConstant(0, DL));
}

Expand Down
21 changes: 9 additions & 12 deletions llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1638,10 +1638,9 @@ define void @vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8(ptr %in.
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
; AVX512BW-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
Expand Down Expand Up @@ -3539,11 +3538,10 @@ define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %i
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm2 = [0,25,0,27,0,29,0,31]
; AVX512BW-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm1 = [0,25,0,27,0,29,0,31]
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
; AVX512BW-NEXT: vzeroupper
Expand Down Expand Up @@ -3672,11 +3670,10 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm2 = [0,25,26,0,28,29,0,31]
; AVX512BW-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm1 = [0,25,26,0,28,29,0,31]
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
; AVX512BW-NEXT: vzeroupper
Expand Down
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll
Original file line number Diff line number Diff line change
Expand Up @@ -856,8 +856,7 @@ define <16 x i8> @oddelts_v32i16_shuffle_v16i16_to_v16i8(<32 x i16> %n2) nounwin
; AVX512VBMI-LABEL: oddelts_v32i16_shuffle_v16i16_to_v16i8:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm1 = [2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62]
; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VBMI-NEXT: vpermt2b %ymm2, %ymm1, %ymm0
; AVX512VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0
; AVX512VBMI-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512VBMI-NEXT: vzeroupper
; AVX512VBMI-NEXT: retq
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1638,10 +1638,9 @@ define void @vec256_i16_widen_to_i32_factor2_broadcast_to_v8i32_factor8(ptr %in.
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm2 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
; AVX512BW-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0
; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31]
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
Expand Down

0 comments on commit eed7c5e

Please sign in to comment.