1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod < %s | FileCheck %s
4 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
5 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
7 define i32 @test_udot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
8 ; CHECK-LABEL: test_udot_v8i8:
9 ; CHECK: // %bb.0: // %entry
10 ; CHECK-NEXT: ldr d0, [x0]
11 ; CHECK-NEXT: ldr d1, [x1]
12 ; CHECK-NEXT: movi v2.2d, #0000000000000000
13 ; CHECK-NEXT: udot v2.2s, v1.8b, v0.8b
14 ; CHECK-NEXT: addp v0.2s, v2.2s, v2.2s
15 ; CHECK-NEXT: fmov w0, s0
18 %0 = bitcast i8* %a to <8 x i8>*
19 %1 = load <8 x i8>, <8 x i8>* %0
20 %2 = zext <8 x i8> %1 to <8 x i32>
21 %3 = bitcast i8* %b to <8 x i8>*
22 %4 = load <8 x i8>, <8 x i8>* %3
23 %5 = zext <8 x i8> %4 to <8 x i32>
24 %6 = mul nuw nsw <8 x i32> %5, %2
25 %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
29 define i32 @test_udot_v8i8_nomla(i8* nocapture readonly %a1) {
30 ; CHECK-LABEL: test_udot_v8i8_nomla:
31 ; CHECK: // %bb.0: // %entry
32 ; CHECK-NEXT: ldr d0, [x0]
33 ; CHECK-NEXT: movi v1.2d, #0000000000000000
34 ; CHECK-NEXT: movi v2.8b, #1
35 ; CHECK-NEXT: udot v1.2s, v0.8b, v2.8b
36 ; CHECK-NEXT: addp v0.2s, v1.2s, v1.2s
37 ; CHECK-NEXT: fmov w0, s0
40 %0 = bitcast i8* %a1 to <8 x i8>*
41 %1 = load <8 x i8>, <8 x i8>* %0
42 %2 = zext <8 x i8> %1 to <8 x i32>
43 %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
47 define i32 @test_sdot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
48 ; CHECK-LABEL: test_sdot_v8i8:
49 ; CHECK: // %bb.0: // %entry
50 ; CHECK-NEXT: ldr d0, [x0]
51 ; CHECK-NEXT: ldr d1, [x1]
52 ; CHECK-NEXT: movi v2.2d, #0000000000000000
53 ; CHECK-NEXT: sdot v2.2s, v1.8b, v0.8b
54 ; CHECK-NEXT: addp v0.2s, v2.2s, v2.2s
55 ; CHECK-NEXT: fmov w0, s0
58 %0 = bitcast i8* %a to <8 x i8>*
59 %1 = load <8 x i8>, <8 x i8>* %0
60 %2 = sext <8 x i8> %1 to <8 x i32>
61 %3 = bitcast i8* %b to <8 x i8>*
62 %4 = load <8 x i8>, <8 x i8>* %3
63 %5 = sext <8 x i8> %4 to <8 x i32>
64 %6 = mul nsw <8 x i32> %5, %2
65 %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
69 define i32 @test_sdot_v8i8_nomla(i8* nocapture readonly %a1) {
70 ; CHECK-LABEL: test_sdot_v8i8_nomla:
71 ; CHECK: // %bb.0: // %entry
72 ; CHECK-NEXT: ldr d0, [x0]
73 ; CHECK-NEXT: movi v1.2d, #0000000000000000
74 ; CHECK-NEXT: movi v2.8b, #1
75 ; CHECK-NEXT: sdot v1.2s, v0.8b, v2.8b
76 ; CHECK-NEXT: addp v0.2s, v1.2s, v1.2s
77 ; CHECK-NEXT: fmov w0, s0
80 %0 = bitcast i8* %a1 to <8 x i8>*
81 %1 = load <8 x i8>, <8 x i8>* %0
82 %2 = sext <8 x i8> %1 to <8 x i32>
83 %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
88 define i32 @test_udot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
89 ; CHECK-LABEL: test_udot_v16i8:
90 ; CHECK: // %bb.0: // %entry
91 ; CHECK-NEXT: ldr q0, [x0]
92 ; CHECK-NEXT: ldr q1, [x1]
93 ; CHECK-NEXT: movi v2.2d, #0000000000000000
94 ; CHECK-NEXT: udot v2.4s, v1.16b, v0.16b
95 ; CHECK-NEXT: addv s0, v2.4s
96 ; CHECK-NEXT: fmov w8, s0
97 ; CHECK-NEXT: add w0, w8, w2
100 %0 = bitcast i8* %a to <16 x i8>*
101 %1 = load <16 x i8>, <16 x i8>* %0
102 %2 = zext <16 x i8> %1 to <16 x i32>
103 %3 = bitcast i8* %b to <16 x i8>*
104 %4 = load <16 x i8>, <16 x i8>* %3
105 %5 = zext <16 x i8> %4 to <16 x i32>
106 %6 = mul nuw nsw <16 x i32> %5, %2
107 %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
108 %op.extra = add i32 %7, %sum
112 define i32 @test_udot_v16i8_nomla(i8* nocapture readonly %a1) {
113 ; CHECK-LABEL: test_udot_v16i8_nomla:
114 ; CHECK: // %bb.0: // %entry
115 ; CHECK-NEXT: ldr q0, [x0]
116 ; CHECK-NEXT: movi v1.16b, #1
117 ; CHECK-NEXT: movi v2.2d, #0000000000000000
118 ; CHECK-NEXT: udot v2.4s, v0.16b, v1.16b
119 ; CHECK-NEXT: addv s0, v2.4s
120 ; CHECK-NEXT: fmov w0, s0
123 %0 = bitcast i8* %a1 to <16 x i8>*
124 %1 = load <16 x i8>, <16 x i8>* %0
125 %2 = zext <16 x i8> %1 to <16 x i32>
126 %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
130 define i32 @test_sdot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
131 ; CHECK-LABEL: test_sdot_v16i8:
132 ; CHECK: // %bb.0: // %entry
133 ; CHECK-NEXT: ldr q0, [x0]
134 ; CHECK-NEXT: ldr q1, [x1]
135 ; CHECK-NEXT: movi v2.2d, #0000000000000000
136 ; CHECK-NEXT: sdot v2.4s, v1.16b, v0.16b
137 ; CHECK-NEXT: addv s0, v2.4s
138 ; CHECK-NEXT: fmov w8, s0
139 ; CHECK-NEXT: add w0, w8, w2
142 %0 = bitcast i8* %a to <16 x i8>*
143 %1 = load <16 x i8>, <16 x i8>* %0
144 %2 = sext <16 x i8> %1 to <16 x i32>
145 %3 = bitcast i8* %b to <16 x i8>*
146 %4 = load <16 x i8>, <16 x i8>* %3
147 %5 = sext <16 x i8> %4 to <16 x i32>
148 %6 = mul nsw <16 x i32> %5, %2
149 %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
150 %op.extra = add nsw i32 %7, %sum
154 define i32 @test_sdot_v16i8_nomla(i8* nocapture readonly %a1) {
155 ; CHECK-LABEL: test_sdot_v16i8_nomla:
156 ; CHECK: // %bb.0: // %entry
157 ; CHECK-NEXT: ldr q0, [x0]
158 ; CHECK-NEXT: movi v1.16b, #1
159 ; CHECK-NEXT: movi v2.2d, #0000000000000000
160 ; CHECK-NEXT: sdot v2.4s, v0.16b, v1.16b
161 ; CHECK-NEXT: addv s0, v2.4s
162 ; CHECK-NEXT: fmov w0, s0
165 %0 = bitcast i8* %a1 to <16 x i8>*
166 %1 = load <16 x i8>, <16 x i8>* %0
167 %2 = sext <16 x i8> %1 to <16 x i32>
168 %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
173 define i32 @test_udot_v8i8_double(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
174 ; CHECK-LABEL: test_udot_v8i8_double:
175 ; CHECK: // %bb.0: // %entry
176 ; CHECK-NEXT: movi v4.2d, #0000000000000000
177 ; CHECK-NEXT: udot v4.2s, v2.8b, v3.8b
178 ; CHECK-NEXT: udot v4.2s, v0.8b, v1.8b
179 ; CHECK-NEXT: addp v0.2s, v4.2s, v4.2s
180 ; CHECK-NEXT: fmov w0, s0
183 %az = zext <8 x i8> %a to <8 x i32>
184 %bz = zext <8 x i8> %b to <8 x i32>
185 %m1 = mul nuw nsw <8 x i32> %az, %bz
186 %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m1)
187 %cz = zext <8 x i8> %c to <8 x i32>
188 %dz = zext <8 x i8> %d to <8 x i32>
189 %m2 = mul nuw nsw <8 x i32> %cz, %dz
190 %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m2)
191 %x = add i32 %r1, %r2
195 define i32 @test_udot_v8i8_double_nomla(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
196 ; CHECK-LABEL: test_udot_v8i8_double_nomla:
197 ; CHECK: // %bb.0: // %entry
198 ; CHECK-NEXT: movi v1.2d, #0000000000000000
199 ; CHECK-NEXT: movi v3.8b, #1
200 ; CHECK-NEXT: udot v1.2s, v2.8b, v3.8b
201 ; CHECK-NEXT: udot v1.2s, v0.8b, v3.8b
202 ; CHECK-NEXT: addp v0.2s, v1.2s, v1.2s
203 ; CHECK-NEXT: fmov w0, s0
206 %az = zext <8 x i8> %a to <8 x i32>
207 %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %az)
208 %cz = zext <8 x i8> %c to <8 x i32>
209 %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %cz)
210 %x = add i32 %r1, %r2
214 define i32 @test_udot_v16i8_double(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
215 ; CHECK-LABEL: test_udot_v16i8_double:
216 ; CHECK: // %bb.0: // %entry
217 ; CHECK-NEXT: movi v4.2d, #0000000000000000
218 ; CHECK-NEXT: udot v4.4s, v2.16b, v3.16b
219 ; CHECK-NEXT: udot v4.4s, v0.16b, v1.16b
220 ; CHECK-NEXT: addv s0, v4.4s
221 ; CHECK-NEXT: fmov w0, s0
224 %az = zext <16 x i8> %a to <16 x i32>
225 %bz = zext <16 x i8> %b to <16 x i32>
226 %m1 = mul nuw nsw <16 x i32> %az, %bz
227 %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m1)
228 %cz = zext <16 x i8> %c to <16 x i32>
229 %dz = zext <16 x i8> %d to <16 x i32>
230 %m2 = mul nuw nsw <16 x i32> %cz, %dz
231 %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m2)
232 %x = add i32 %r1, %r2
236 define i32 @test_udot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
237 ; CHECK-LABEL: test_udot_v16i8_double_nomla:
238 ; CHECK: // %bb.0: // %entry
239 ; CHECK-NEXT: movi v1.16b, #1
240 ; CHECK-NEXT: movi v3.2d, #0000000000000000
241 ; CHECK-NEXT: udot v3.4s, v2.16b, v1.16b
242 ; CHECK-NEXT: udot v3.4s, v0.16b, v1.16b
243 ; CHECK-NEXT: addv s0, v3.4s
244 ; CHECK-NEXT: fmov w0, s0
247 %az = zext <16 x i8> %a to <16 x i32>
248 %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %az)
249 %cz = zext <16 x i8> %c to <16 x i32>
250 %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %cz)
251 %x = add i32 %r1, %r2
255 define i32 @test_sdot_v8i8_double(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
256 ; CHECK-LABEL: test_sdot_v8i8_double:
257 ; CHECK: // %bb.0: // %entry
258 ; CHECK-NEXT: movi v4.2d, #0000000000000000
259 ; CHECK-NEXT: sdot v4.2s, v2.8b, v3.8b
260 ; CHECK-NEXT: sdot v4.2s, v0.8b, v1.8b
261 ; CHECK-NEXT: addp v0.2s, v4.2s, v4.2s
262 ; CHECK-NEXT: fmov w0, s0
265 %az = sext <8 x i8> %a to <8 x i32>
266 %bz = sext <8 x i8> %b to <8 x i32>
267 %m1 = mul nuw nsw <8 x i32> %az, %bz
268 %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m1)
269 %cz = sext <8 x i8> %c to <8 x i32>
270 %dz = sext <8 x i8> %d to <8 x i32>
271 %m2 = mul nuw nsw <8 x i32> %cz, %dz
272 %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m2)
273 %x = add i32 %r1, %r2
277 define i32 @test_sdot_v8i8_double_nomla(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
278 ; CHECK-LABEL: test_sdot_v8i8_double_nomla:
279 ; CHECK: // %bb.0: // %entry
280 ; CHECK-NEXT: movi v1.2d, #0000000000000000
281 ; CHECK-NEXT: movi v3.8b, #1
282 ; CHECK-NEXT: sdot v1.2s, v2.8b, v3.8b
283 ; CHECK-NEXT: sdot v1.2s, v0.8b, v3.8b
284 ; CHECK-NEXT: addp v0.2s, v1.2s, v1.2s
285 ; CHECK-NEXT: fmov w0, s0
288 %az = sext <8 x i8> %a to <8 x i32>
289 %r1 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %az)
290 %cz = sext <8 x i8> %c to <8 x i32>
291 %r2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %cz)
292 %x = add i32 %r1, %r2
296 define i32 @test_sdot_v16i8_double(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
297 ; CHECK-LABEL: test_sdot_v16i8_double:
298 ; CHECK: // %bb.0: // %entry
299 ; CHECK-NEXT: movi v4.2d, #0000000000000000
300 ; CHECK-NEXT: sdot v4.4s, v2.16b, v3.16b
301 ; CHECK-NEXT: sdot v4.4s, v0.16b, v1.16b
302 ; CHECK-NEXT: addv s0, v4.4s
303 ; CHECK-NEXT: fmov w0, s0
306 %az = sext <16 x i8> %a to <16 x i32>
307 %bz = sext <16 x i8> %b to <16 x i32>
308 %m1 = mul nuw nsw <16 x i32> %az, %bz
309 %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m1)
310 %cz = sext <16 x i8> %c to <16 x i32>
311 %dz = sext <16 x i8> %d to <16 x i32>
312 %m2 = mul nuw nsw <16 x i32> %cz, %dz
313 %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m2)
314 %x = add i32 %r1, %r2
318 define i32 @test_sdot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
319 ; CHECK-LABEL: test_sdot_v16i8_double_nomla:
320 ; CHECK: // %bb.0: // %entry
321 ; CHECK-NEXT: movi v1.16b, #1
322 ; CHECK-NEXT: movi v3.2d, #0000000000000000
323 ; CHECK-NEXT: sdot v3.4s, v2.16b, v1.16b
324 ; CHECK-NEXT: sdot v3.4s, v0.16b, v1.16b
325 ; CHECK-NEXT: addv s0, v3.4s
326 ; CHECK-NEXT: fmov w0, s0
329 %az = sext <16 x i8> %a to <16 x i32>
330 %r1 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %az)
331 %cz = sext <16 x i8> %c to <16 x i32>
332 %r2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %cz)
333 %x = add i32 %r1, %r2