Skip to content

Commit

Permalink
[X86] Support load/store for bf16 in avx
Browse files Browse the repository at this point in the history
Reviewed By: LuoYuanke
Differential Revision: https://reviews.llvm.org/D144163
  • Loading branch information
xiangzh1 committed Feb 16, 2023
1 parent 7e6e636 commit 96df79a
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 0 deletions.
17 changes: 17 additions & 0 deletions llvm/lib/Target/X86/X86InstrSSE.td
Original file line number Diff line number Diff line change
Expand Up @@ -577,20 +577,37 @@ let Predicates = [HasAVX, NoVLX] in {

def : Pat<(alignedloadv8f16 addr:$src),
(VMOVAPSrm addr:$src)>;
def : Pat<(alignedloadv8bf16 addr:$src),
(VMOVAPSrm addr:$src)>;
def : Pat<(loadv8f16 addr:$src),
(VMOVUPSrm addr:$src)>;
def : Pat<(loadv8bf16 addr:$src),
(VMOVUPSrm addr:$src)>;
def : Pat<(alignedstore (v8f16 VR128:$src), addr:$dst),
(VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(alignedstore (v8bf16 VR128:$src), addr:$dst),
(VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v8f16 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v8bf16 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;

def : Pat<(alignedloadv16f16 addr:$src),
(VMOVAPSYrm addr:$src)>;
def : Pat<(alignedloadv16bf16 addr:$src),
(VMOVAPSYrm addr:$src)>;
def : Pat<(loadv16f16 addr:$src),
(VMOVUPSYrm addr:$src)>;
def : Pat<(loadv16bf16 addr:$src),
(VMOVUPSYrm addr:$src)>;
def : Pat<(alignedstore (v16f16 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(alignedstore (v16bf16 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v16f16 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v16bf16 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
}

// Use movaps / movups for SSE integer load / store (one byte shorter).
Expand Down
43 changes: 43 additions & 0 deletions llvm/test/CodeGen/X86/avx512bf16-mov.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X86

define dso_local void @funbf16(ptr readonly %src, ptr writeonly %dst) {
; X64-LABEL: funbf16:
; X64: # %bb.0: # %entry
; X64-NEXT: vmovups (%rdi), %xmm0
; X64-NEXT: vmovups %xmm0, (%rsi)
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vmovaps %xmm0, (%rsi)
; X64-NEXT: vmovups (%rdi), %ymm0
; X64-NEXT: vmovups %ymm0, (%rsi)
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vmovaps %ymm0, (%rsi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
;
; X86-LABEL: funbf16:
; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovups (%ecx), %xmm0
; X86-NEXT: vmovups %xmm0, (%eax)
; X86-NEXT: vmovaps (%ecx), %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: vmovups (%ecx), %ymm0
; X86-NEXT: vmovups %ymm0, (%eax)
; X86-NEXT: vmovaps (%ecx), %ymm0
; X86-NEXT: vmovaps %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: retl
entry:
%0 = load <8 x bfloat>, ptr %src, align 1
store <8 x bfloat> %0, ptr %dst, align 1
%1 = load <8 x bfloat>, ptr %src, align 32
store <8 x bfloat> %1, ptr %dst, align 32
%2 = load <16 x bfloat>, ptr %src, align 1
store <16 x bfloat> %2, ptr %dst, align 1
%3 = load <16 x bfloat>, ptr %src, align 32
store <16 x bfloat> %3, ptr %dst, align 32
ret void
}

0 comments on commit 96df79a

Please sign in to comment.