Skip to content

Commit

Permalink
Merged master:8f2f613a6ec into amd-gfx:af3866b06c5
Browse files Browse the repository at this point in the history
Local branch amd-gfx af3866b Merged master:59521256915 into amd-gfx:5ccf858b005
Remote branch master 8f2f613 [X86][AVX] combineX86ShufflesRecursively - peekThroughOneUseBitcasts subvector before widening.
  • Loading branch information
Sw authored and Sw committed May 31, 2020
2 parents af3866b + 8f2f613 commit 216ed1d
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 86 deletions.
3 changes: 2 additions & 1 deletion llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35047,7 +35047,8 @@ static SDValue combineX86ShufflesRecursively(

for (SDValue &Op : OpInputs)
if (Op.getValueSizeInBits() < RootSizeInBits)
Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op), RootSizeInBits);
Op = widenSubVector(peekThroughOneUseBitcasts(Op), false, Subtarget, DAG,
SDLoc(Op), RootSizeInBits);

SmallVector<int, 64> Mask;
SmallVector<SDValue, 16> Ops;
Expand Down
109 changes: 34 additions & 75 deletions llvm/test/CodeGen/X86/vector-reduce-mul.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1970,18 +1970,13 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; AVX2-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
Expand Down Expand Up @@ -2058,18 +2053,13 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; AVX512DQ-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512DQ-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512DQ-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512DQ-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovd %xmm0, %eax
Expand All @@ -2090,18 +2080,13 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512DQVL-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQVL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512DQVL-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512DQVL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512DQVL-NEXT: vmovd %xmm0, %eax
Expand Down Expand Up @@ -2254,18 +2239,13 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; AVX2-NEXT: vpmullw %xmm0, %xmm3, %xmm0
; AVX2-NEXT: vpmullw %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,2,3,6,7,6,7]
; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpsrld $16, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
Expand Down Expand Up @@ -2295,18 +2275,13 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; AVX512BW-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512BW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BW-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512BW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX512BW-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
Expand Down Expand Up @@ -2336,13 +2311,10 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BWVL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
; AVX512BWVL-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm1
Expand Down Expand Up @@ -2637,18 +2609,13 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; AVX2-NEXT: vpmullw %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm1
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
Expand Down Expand Up @@ -2681,18 +2648,13 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; AVX512BW-NEXT: vpmullw %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vpmullw %xmm0, %xmm2, %xmm0
; AVX512BW-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512BW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BW-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpand %xmm3, %xmm0, %xmm1
; AVX512BW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX512BW-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vmovd %xmm0, %eax
Expand Down Expand Up @@ -2725,13 +2687,10 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX512BWVL-NEXT: vpmullw %xmm0, %xmm1, %xmm0
; AVX512BWVL-NEXT: vpmullw %xmm0, %xmm2, %xmm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BWVL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
; AVX512BWVL-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
; AVX512BWVL-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[12],zero,xmm0[14],zero,zero,zero,zero,zero
; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm1
Expand Down
20 changes: 10 additions & 10 deletions mlir/lib/Transforms/DialectConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -877,8 +877,8 @@ void ConversionPatternRewriterImpl::replaceOp(Operation *op,
// Record the requested operation replacement.
replacements.emplace_back(op, newValues);

/// Mark this operation as recursively ignored so that we don't need to
/// convert any nested operations.
// Mark this operation as recursively ignored so that we don't need to
// convert any nested operations.
markNestedOpsIgnored(op);
}

Expand Down Expand Up @@ -1647,13 +1647,13 @@ LogicalResult OperationConverter::convert(ConversionPatternRewriter &rewriter,
// Legalize the given operation.
if (failed(opLegalizer.legalize(op, rewriter))) {
// Handle the case of a failed conversion for each of the different modes.
/// Full conversions expect all operations to be converted.
// Full conversions expect all operations to be converted.
if (mode == OpConversionMode::Full)
return op->emitError()
<< "failed to legalize operation '" << op->getName() << "'";
/// Partial conversions allow conversions to fail iff the operation was not
/// explicitly marked as illegal. If the user provided a nonlegalizableOps
/// set, non-legalizable ops are included.
// Partial conversions allow conversions to fail iff the operation was not
// explicitly marked as illegal. If the user provided a nonlegalizableOps
// set, non-legalizable ops are included.
if (mode == OpConversionMode::Partial) {
if (opLegalizer.isIllegal(op))
return op->emitError()
Expand All @@ -1663,9 +1663,9 @@ LogicalResult OperationConverter::convert(ConversionPatternRewriter &rewriter,
trackedOps->insert(op);
}
} else {
/// Analysis conversions don't fail if any operations fail to legalize,
/// they are only interested in the operations that were successfully
/// legalized.
// Analysis conversions don't fail if any operations fail to legalize,
// they are only interested in the operations that were successfully
// legalized.
if (mode == OpConversionMode::Analysis)
trackedOps->insert(op);

Expand All @@ -1684,7 +1684,7 @@ OperationConverter::convertOperations(ArrayRef<Operation *> ops,
return success();
ConversionTarget &target = opLegalizer.getTarget();

/// Compute the set of operations and blocks to convert.
// Compute the set of operations and blocks to convert.
std::vector<Operation *> toConvert;
for (auto *op : ops) {
toConvert.emplace_back(op);
Expand Down

0 comments on commit 216ed1d

Please sign in to comment.