1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s
4 target triple = "aarch64-linux"
6 define void @add_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #0 {
7 ; CHECK-LABEL: add_f16_vg1x2:
9 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
10 ; CHECK-NEXT: mov w8, w0
11 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
12 ; CHECK-NEXT: fadd za.h[w8, 0, vgx2], { z0.h, z1.h }
13 ; CHECK-NEXT: fadd za.h[w8, 7, vgx2], { z0.h, z1.h }
15 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
16 %slice.7 = add i32 %slice, 7
17 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
21 define void @add_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
22 ; CHECK-LABEL: add_f16_vg1x4:
24 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
25 ; CHECK-NEXT: mov w8, w0
26 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
27 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
28 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
29 ; CHECK-NEXT: fadd za.h[w8, 0, vgx4], { z0.h - z3.h }
30 ; CHECK-NEXT: fadd za.h[w8, 7, vgx4], { z0.h - z3.h }
32 <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3) #1 {
33 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
34 <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3);
35 %slice.7 = add i32 %slice, 7
36 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
37 <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3);
41 define void @sub_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #1 {
42 ; CHECK-LABEL: sub_f16_vg1x2:
44 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
45 ; CHECK-NEXT: mov w8, w0
46 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
47 ; CHECK-NEXT: fsub za.h[w8, 0, vgx2], { z0.h, z1.h }
48 ; CHECK-NEXT: fsub za.h[w8, 7, vgx2], { z0.h, z1.h }
50 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
51 %slice.7 = add i32 %slice, 7
52 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
56 define void @sub_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
57 ; CHECK-LABEL: sub_f16_vg1x4:
59 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
60 ; CHECK-NEXT: mov w8, w0
61 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
62 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
63 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
64 ; CHECK-NEXT: fsub za.h[w8, 0, vgx4], { z0.h - z3.h }
65 ; CHECK-NEXT: fsub za.h[w8, 7, vgx4], { z0.h - z3.h }
67 <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3) #0 {
68 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
69 <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3);
70 %slice.7 = add i32 %slice, 7
71 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
72 <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3);
76 define void @add_bf16_vg1x2(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) #2 {
77 ; CHECK-LABEL: add_bf16_vg1x2:
79 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
80 ; CHECK-NEXT: mov w8, w0
81 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
82 ; CHECK-NEXT: bfadd za.h[w8, 0, vgx2], { z0.h, z1.h }
83 ; CHECK-NEXT: bfadd za.h[w8, 7, vgx2], { z0.h, z1.h }
85 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
86 %slice.7 = add i32 %slice, 7
87 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
91 define void @add_bf16_vg1x4(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
92 ; CHECK-LABEL: add_bf16_vg1x4:
94 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
95 ; CHECK-NEXT: mov w8, w0
96 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
97 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
98 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
99 ; CHECK-NEXT: bfadd za.h[w8, 0, vgx4], { z0.h - z3.h }
100 ; CHECK-NEXT: bfadd za.h[w8, 7, vgx4], { z0.h - z3.h }
102 <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3) #2 {
103 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
104 <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3);
105 %slice.7 = add i32 %slice, 7
106 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
107 <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3);
111 define void @sub_bf16_vg1x2(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) #2 {
112 ; CHECK-LABEL: sub_bf16_vg1x2:
114 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
115 ; CHECK-NEXT: mov w8, w0
116 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
117 ; CHECK-NEXT: bfsub za.h[w8, 0, vgx2], { z0.h, z1.h }
118 ; CHECK-NEXT: bfsub za.h[w8, 7, vgx2], { z0.h, z1.h }
120 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
121 %slice.7 = add i32 %slice, 7
122 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
126 define void @sub_bf16_vg1x4(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
127 ; CHECK-LABEL: sub_bf16_vg1x4:
129 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
130 ; CHECK-NEXT: mov w8, w0
131 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
132 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
133 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
134 ; CHECK-NEXT: bfsub za.h[w8, 0, vgx4], { z0.h - z3.h }
135 ; CHECK-NEXT: bfsub za.h[w8, 7, vgx4], { z0.h - z3.h }
137 <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3) #2 {
138 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
139 <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3);
140 %slice.7 = add i32 %slice, 7
141 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
142 <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3);
146 attributes #0 = { nounwind "target-features"="+sme-f16f16" }
147 attributes #1 = { nounwind "target-features"="+sme-f8f16" }
148 attributes #2 = { nounwind "target-features"="+sme2,+bf16,+b16b16" }