1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -O0 -mtriple=aarch64 -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
6 tracksRegLiveness: true
11 ; CHECK-LABEL: name: fadd_v2s32
13 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
14 ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s32) = G_VECREDUCE_FADD [[COPY]](<2 x s32>)
15 ; CHECK: $w0 = COPY [[VECREDUCE_FADD]](s32)
16 ; CHECK: RET_ReallyLR implicit $w0
17 %0:_(<2 x s32>) = COPY $d0
18 %1:_(s32) = G_VECREDUCE_FADD %0(<2 x s32>)
20 RET_ReallyLR implicit $w0
25 tracksRegLiveness: true
30 ; CHECK-LABEL: name: fadd_v2s64
32 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
33 ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[COPY]](<2 x s64>)
34 ; CHECK: $x0 = COPY [[VECREDUCE_FADD]](s64)
35 ; CHECK: RET_ReallyLR implicit $x0
36 %0:_(<2 x s64>) = COPY $q0
37 %2:_(s64) = G_VECREDUCE_FADD %0(<2 x s64>)
39 RET_ReallyLR implicit $x0
45 tracksRegLiveness: true
48 liveins: $q0, $q1, $q2, $q3
49 ; This is a power-of-2 legalization, so use a tree reduction.
50 ; CHECK-LABEL: name: fadd_v8s64
51 ; CHECK: liveins: $q0, $q1, $q2, $q3
52 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
53 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
54 ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
55 ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
56 ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
57 ; CHECK: [[FADD1:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY2]], [[COPY3]]
58 ; CHECK: [[FADD2:%[0-9]+]]:_(<2 x s64>) = G_FADD [[FADD]], [[FADD1]]
59 ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[FADD2]](<2 x s64>)
60 ; CHECK: $x0 = COPY [[VECREDUCE_FADD]](s64)
61 ; CHECK: RET_ReallyLR implicit $x0
62 %0:_(<2 x s64>) = COPY $q0
63 %1:_(<2 x s64>) = COPY $q1
64 %2:_(<2 x s64>) = COPY $q2
65 %3:_(<2 x s64>) = COPY $q3
66 %4:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
67 %5:_(<4 x s64>) = G_CONCAT_VECTORS %2(<2 x s64>), %3(<2 x s64>)
68 %6:_(<8 x s64>) = G_CONCAT_VECTORS %4(<4 x s64>), %5(<4 x s64>)
69 %7:_(s64) = G_VECREDUCE_FADD %6(<8 x s64>)
71 RET_ReallyLR implicit $x0