1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
3 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \
4 ; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s
5 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
6 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \
7 ; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE
9 declare <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>)
10 declare <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
12 define void @intrinsics1(<16 x i8> %vc1, <16 x i8> %vc2, <16 x i8> %vc3, <16 x i8> %vc4, i8* %ptr) {
13 ; CHECK-LABEL: intrinsics1:
16 ; CHECK-NEXT: .cfi_def_cfa_offset 176
17 ; CHECK-NEXT: .cfi_offset lr, 16
18 ; CHECK-NEXT: .cfi_offset r30, -16
19 ; CHECK-NEXT: std r30, -16(r1) # 8-byte Folded Spill
20 ; CHECK-NEXT: std r0, 16(r1)
21 ; CHECK-NEXT: stdu r1, -176(r1)
22 ; CHECK-NEXT: # kill: def $v5 killed $v5 killed $vsrp18 def $vsrp18
23 ; CHECK-NEXT: # kill: def $v4 killed $v4 killed $vsrp18 def $vsrp18
24 ; CHECK-NEXT: # kill: def $v3 killed $v3 killed $vsrp17 def $vsrp17
25 ; CHECK-NEXT: # kill: def $v2 killed $v2 killed $vsrp17 def $vsrp17
26 ; CHECK-NEXT: xxlor vs0, v2, v2
27 ; CHECK-NEXT: xxlor vs1, v3, v3
28 ; CHECK-NEXT: stxvp vsp34, 128(r1) # 32-byte Folded Spill
29 ; CHECK-NEXT: ld r30, 272(r1)
30 ; CHECK-NEXT: stxvp vsp36, 96(r1) # 32-byte Folded Spill
31 ; CHECK-NEXT: xxlor vs2, v4, v4
32 ; CHECK-NEXT: xxlor vs3, v5, v5
33 ; CHECK-NEXT: xxmtacc acc0
34 ; CHECK-NEXT: xvf16ger2pp acc0, v2, v4
35 ; CHECK-NEXT: xxmfacc acc0
36 ; CHECK-NEXT: stxvp vsp0, 64(r1)
37 ; CHECK-NEXT: stxvp vsp2, 32(r1)
38 ; CHECK-NEXT: bl foo@notoc
39 ; CHECK-NEXT: lxvp vsp0, 64(r1)
40 ; CHECK-NEXT: lxvp vsp2, 32(r1)
41 ; CHECK-NEXT: lxvp vsp34, 128(r1) # 32-byte Folded Reload
42 ; CHECK-NEXT: lxvp vsp36, 96(r1) # 32-byte Folded Reload
43 ; CHECK-NEXT: xxmtacc acc0
44 ; CHECK-NEXT: xvf16ger2pp acc0, v2, v4
45 ; CHECK-NEXT: xxmfacc acc0
46 ; CHECK-NEXT: stxv vs0, 48(r30)
47 ; CHECK-NEXT: stxv vs1, 32(r30)
48 ; CHECK-NEXT: stxv vs2, 16(r30)
49 ; CHECK-NEXT: stxv vs3, 0(r30)
50 ; CHECK-NEXT: addi r1, r1, 176
51 ; CHECK-NEXT: ld r0, 16(r1)
52 ; CHECK-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
56 ; CHECK-BE-LABEL: intrinsics1:
58 ; CHECK-BE-NEXT: mflr r0
59 ; CHECK-BE-NEXT: std r0, 16(r1)
60 ; CHECK-BE-NEXT: stdu r1, -256(r1)
61 ; CHECK-BE-NEXT: .cfi_def_cfa_offset 256
62 ; CHECK-BE-NEXT: .cfi_offset lr, 16
63 ; CHECK-BE-NEXT: .cfi_offset r30, -16
64 ; CHECK-BE-NEXT: std r30, 240(r1) # 8-byte Folded Spill
65 ; CHECK-BE-NEXT: # kill: def $v5 killed $v5 killed $vsrp18 def $vsrp18
66 ; CHECK-BE-NEXT: # kill: def $v4 killed $v4 killed $vsrp18 def $vsrp18
67 ; CHECK-BE-NEXT: # kill: def $v3 killed $v3 killed $vsrp17 def $vsrp17
68 ; CHECK-BE-NEXT: # kill: def $v2 killed $v2 killed $vsrp17 def $vsrp17
69 ; CHECK-BE-NEXT: xxlor vs0, v2, v2
70 ; CHECK-BE-NEXT: xxlor vs1, v3, v3
71 ; CHECK-BE-NEXT: stxvp vsp34, 208(r1) # 32-byte Folded Spill
72 ; CHECK-BE-NEXT: ld r30, 368(r1)
73 ; CHECK-BE-NEXT: xxlor vs2, v4, v4
74 ; CHECK-BE-NEXT: xxlor vs3, v5, v5
75 ; CHECK-BE-NEXT: stxvp vsp36, 176(r1) # 32-byte Folded Spill
76 ; CHECK-BE-NEXT: xxmtacc acc0
77 ; CHECK-BE-NEXT: xvf16ger2pp acc0, v2, v4
78 ; CHECK-BE-NEXT: xxmfacc acc0
79 ; CHECK-BE-NEXT: stxvp vsp0, 112(r1)
80 ; CHECK-BE-NEXT: stxvp vsp2, 144(r1)
81 ; CHECK-BE-NEXT: bl foo
83 ; CHECK-BE-NEXT: lxvp vsp0, 112(r1)
84 ; CHECK-BE-NEXT: lxvp vsp2, 144(r1)
85 ; CHECK-BE-NEXT: lxvp vsp34, 208(r1) # 32-byte Folded Reload
86 ; CHECK-BE-NEXT: lxvp vsp36, 176(r1) # 32-byte Folded Reload
87 ; CHECK-BE-NEXT: xxmtacc acc0
88 ; CHECK-BE-NEXT: xvf16ger2pp acc0, v2, v4
89 ; CHECK-BE-NEXT: xxmfacc acc0
90 ; CHECK-BE-NEXT: stxv vs1, 16(r30)
91 ; CHECK-BE-NEXT: stxv vs0, 0(r30)
92 ; CHECK-BE-NEXT: stxv vs3, 48(r30)
93 ; CHECK-BE-NEXT: stxv vs2, 32(r30)
94 ; CHECK-BE-NEXT: ld r30, 240(r1) # 8-byte Folded Reload
95 ; CHECK-BE-NEXT: addi r1, r1, 256
96 ; CHECK-BE-NEXT: ld r0, 16(r1)
97 ; CHECK-BE-NEXT: mtlr r0
99 %1 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %vc1, <16 x i8> %vc2, <16 x i8> %vc3, <16 x i8> %vc4)
100 %2 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1> %1, <16 x i8> %vc1, <16 x i8> %vc3)
101 tail call void @foo()
102 %3 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1> %2, <16 x i8> %vc1, <16 x i8> %vc3)
103 %4 = bitcast i8* %ptr to <512 x i1>*
104 store <512 x i1> %3, <512 x i1>* %4, align 64