Skip to content

Commit

Permalink
[X86] Emit verbose (constant) comments before EVEX compression tag (#…
Browse files Browse the repository at this point in the history
…78585)

This helps ensure the encoding details are next to the EVEX tag

Noticed while preparing to add more constant commenting as part of #73783 and #71078
  • Loading branch information
RKSimon authored Jan 18, 2024
1 parent bd2430b commit 33287e3
Show file tree
Hide file tree
Showing 7 changed files with 120 additions and 120 deletions.
8 changes: 4 additions & 4 deletions llvm/lib/Target/X86/X86MCInstLower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2041,6 +2041,10 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
}
}

// Add comments for values loaded from constant pool.
if (OutStreamer->isVerboseAsm())
addConstantComments(MI, *OutStreamer);

// Add a comment about EVEX compression
if (TM.Options.MCOptions.ShowMCEncoding) {
if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_LEGACY)
Expand All @@ -2051,10 +2055,6 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
OutStreamer->AddComment("EVEX TO EVEX Compression ", false);
}

// Add comments for values loaded from constant pool.
if (OutStreamer->isVerboseAsm())
addConstantComments(MI, *OutStreamer);

switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
llvm_unreachable("Should be handled target independently");
Expand Down
144 changes: 72 additions & 72 deletions llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll

Large diffs are not rendered by default.

32 changes: 16 additions & 16 deletions llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2156,17 +2156,17 @@ define <8 x i16>@test_int_x86_avx512_maskz_psrlv8_hi(<8 x i16> %x0, <8 x i16> %x
define <8 x i16> @test_int_x86_avx512_psrlv_w_128_const() optsize {
; X86-LABEL: test_int_x86_avx512_psrlv_w_128_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psrlv_w_128_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
Expand All @@ -2180,17 +2180,17 @@ declare <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16>, <8 x i16>)
define <16 x i16> @test_int_x86_avx512_psrlv_w_256_const() optsize {
; X86-LABEL: test_int_x86_avx512_psrlv_w_256_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psrlv_w_256_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
Expand Down Expand Up @@ -2400,17 +2400,17 @@ define <8 x i16>@test_int_x86_avx512_maskz_psllv8_hi(<8 x i16> %x0, <8 x i16> %x
define <8 x i16> @test_int_x86_avx512_psllv_w_128_const() optsize {
; X86-LABEL: test_int_x86_avx512_psllv_w_128_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psllv_w_128_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
Expand All @@ -2425,17 +2425,17 @@ declare <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16>, <8 x i16>)
define <16 x i16> @test_int_x86_avx512_psllv_w_256_const() optsize {
; X86-LABEL: test_int_x86_avx512_psllv_w_256_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psllv_w_256_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7323,17 +7323,17 @@ define <8 x i32>@test_int_x86_avx512_maskz_psrav8_si(<8 x i32> %x0, <8 x i32> %x
define <8 x i32>@test_int_x86_avx512_mask_psrav8_si_const() {
; X86-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 5, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 5, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
Expand Down Expand Up @@ -8636,17 +8636,17 @@ define <2 x i64>@test_int_x86_avx512_maskz_psrav_q_128(<2 x i64> %x0, <2 x i64>
define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: vmovdqa {{.*#+}} xmm0 = [2,0,4294967287,4294967295]
; X86-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: vpsravq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
; X86-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,18446744073709551607]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: vmovdqa {{.*#+}} xmm0 = [2,18446744073709551607]
; X64-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
; X64-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/X86/sse2-intrinsics-x86.ll
Original file line number Diff line number Diff line change
Expand Up @@ -780,8 +780,8 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() {
;
; X86-AVX512-LABEL: test_x86_sse2_packssdw_128_fold:
; X86-AVX512: ## %bb.0:
; X86-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
; X86-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
;
Expand All @@ -801,8 +801,8 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() {
;
; X64-AVX512-LABEL: test_x86_sse2_packssdw_128_fold:
; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,32767,32767,65535,32768]
; X64-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,32767,32767,65535,32768]
; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
Expand Down Expand Up @@ -848,8 +848,8 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() {
;
; X86-AVX512-LABEL: test_x86_sse2_packsswb_128_fold:
; X86-AVX512: ## %bb.0:
; X86-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
;
Expand All @@ -869,8 +869,8 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() {
;
; X64-AVX512-LABEL: test_x86_sse2_packsswb_128_fold:
; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
Expand Down Expand Up @@ -916,8 +916,8 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() {
;
; X86-AVX512-LABEL: test_x86_sse2_packuswb_128_fold:
; X86-AVX512: ## %bb.0:
; X86-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
;
Expand All @@ -937,8 +937,8 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() {
;
; X64-AVX512-LABEL: test_x86_sse2_packuswb_128_fold:
; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/X86/sse41-intrinsics-x86.ll
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,8 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() {
;
; X86-AVX512-LABEL: test_x86_sse41_packusdw_fold:
; X86-AVX512: ## %bb.0:
; X86-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
; X86-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
;
Expand All @@ -222,8 +222,8 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() {
;
; X64-AVX512-LABEL: test_x86_sse41_packusdw_fold:
; X64-AVX512: ## %bb.0:
; X64-AVX512-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
; X64-AVX512-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/X86/vec_fpext.ll
Original file line number Diff line number Diff line change
Expand Up @@ -267,8 +267,8 @@ define <2 x double> @fpext_fromconst() {
;
; X86-AVX512VL-LABEL: fpext_fromconst:
; X86-AVX512VL: # %bb.0: # %entry
; X86-AVX512VL-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
; X86-AVX512VL-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
Expand All @@ -288,8 +288,8 @@ define <2 x double> @fpext_fromconst() {
;
; X64-AVX512VL-LABEL: fpext_fromconst:
; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vmovaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
; X64-AVX512VL-NEXT: # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
entry:
Expand Down

0 comments on commit 33287e3

Please sign in to comment.