1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X64
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X86
5 define dso_local void @funbf16(ptr readonly %src, ptr writeonly %dst) {
7 ; X64: # %bb.0: # %entry
8 ; X64-NEXT: vmovups (%rdi), %xmm0
9 ; X64-NEXT: vmovups %xmm0, (%rsi)
10 ; X64-NEXT: vmovaps (%rdi), %xmm0
11 ; X64-NEXT: vmovaps %xmm0, (%rsi)
12 ; X64-NEXT: vmovups (%rdi), %ymm0
13 ; X64-NEXT: vmovups %ymm0, (%rsi)
14 ; X64-NEXT: vmovaps (%rdi), %ymm0
15 ; X64-NEXT: vmovaps %ymm0, (%rsi)
16 ; X64-NEXT: vzeroupper
20 ; X86: # %bb.0: # %entry
21 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
22 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
23 ; X86-NEXT: vmovups (%ecx), %xmm0
24 ; X86-NEXT: vmovups %xmm0, (%eax)
25 ; X86-NEXT: vmovaps (%ecx), %xmm0
26 ; X86-NEXT: vmovaps %xmm0, (%eax)
27 ; X86-NEXT: vmovups (%ecx), %ymm0
28 ; X86-NEXT: vmovups %ymm0, (%eax)
29 ; X86-NEXT: vmovaps (%ecx), %ymm0
30 ; X86-NEXT: vmovaps %ymm0, (%eax)
31 ; X86-NEXT: vzeroupper
34 %0 = load <8 x bfloat>, ptr %src, align 1
35 store <8 x bfloat> %0, ptr %dst, align 1
36 %1 = load <8 x bfloat>, ptr %src, align 32
37 store <8 x bfloat> %1, ptr %dst, align 32
38 %2 = load <16 x bfloat>, ptr %src, align 1
39 store <16 x bfloat> %2, ptr %dst, align 1
40 %3 = load <16 x bfloat>, ptr %src, align 32
41 store <16 x bfloat> %3, ptr %dst, align 32