1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LOWER
3 # RUN: llc -mtriple aarch64 -O2 -start-before=aarch64-postlegalizer-lowering -stop-after=instruction-select -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SELECT
8 tracksRegLiveness: true
12 ; LOWER-LABEL: name: same_reg
14 ; LOWER: %r:_(s8) = G_IMPLICIT_DEF
15 ; LOWER: %build_vector:_(<8 x s8>) = G_DUP %r(s8)
16 ; LOWER: $d0 = COPY %build_vector(<8 x s8>)
17 ; LOWER: RET_ReallyLR implicit $d0
18 ; SELECT-LABEL: name: same_reg
19 ; SELECT: liveins: $d0
20 ; SELECT: %r:gpr32 = IMPLICIT_DEF
21 ; SELECT: %build_vector:fpr64 = DUPv8i8gpr %r
22 ; SELECT: $d0 = COPY %build_vector
23 ; SELECT: RET_ReallyLR implicit $d0
24 %r:_(s8) = G_IMPLICIT_DEF
25 %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r, %r, %r, %r, %r, %r, %r, %r
26 $d0 = COPY %build_vector(<8 x s8>)
27 RET_ReallyLR implicit $d0
31 name: dont_combine_different_reg
33 tracksRegLiveness: true
36 liveins: $d0, $w0, $w1
37 ; LOWER-LABEL: name: dont_combine_different_reg
38 ; LOWER: liveins: $d0, $w0, $w1
39 ; LOWER: %r:_(s32) = COPY $w0
40 ; LOWER: %q:_(s32) = COPY $w1
41 ; LOWER: %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %r(s32), %q(s32)
42 ; LOWER: $d0 = COPY %build_vector(<2 x s32>)
43 ; LOWER: RET_ReallyLR implicit $d0
44 ; SELECT-LABEL: name: dont_combine_different_reg
45 ; SELECT: liveins: $d0, $w0, $w1
46 ; SELECT: %r:gpr32all = COPY $w0
47 ; SELECT: %q:gpr32 = COPY $w1
48 ; SELECT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
49 ; SELECT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %r, %subreg.ssub
50 ; SELECT: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, %q
51 ; SELECT: %build_vector:fpr64 = COPY [[INSvi32gpr]].dsub
52 ; SELECT: $d0 = COPY %build_vector
53 ; SELECT: RET_ReallyLR implicit $d0
56 %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %r, %q
57 $d0 = COPY %build_vector(<2 x s32>)
58 RET_ReallyLR implicit $d0
62 name: dont_combine_zero
64 tracksRegLiveness: true
68 ; Don't combine with 0. We want to avoid blocking immAllZerosV selection
71 ; LOWER-LABEL: name: dont_combine_zero
73 ; LOWER: %r:_(s8) = G_CONSTANT i8 0
74 ; LOWER: %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8)
75 ; LOWER: $d0 = COPY %build_vector(<8 x s8>)
76 ; LOWER: RET_ReallyLR implicit $d0
77 ; SELECT-LABEL: name: dont_combine_zero
78 ; SELECT: liveins: $d0
79 ; SELECT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
80 ; SELECT: %build_vector:fpr64 = COPY [[MOVIv2d_ns]].dsub
81 ; SELECT: $d0 = COPY %build_vector
82 ; SELECT: RET_ReallyLR implicit $d0
83 %r:_(s8) = G_CONSTANT i8 0
84 %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r, %r, %r, %r, %r, %r, %r, %r
85 $d0 = COPY %build_vector(<8 x s8>)
86 RET_ReallyLR implicit $d0
90 name: dont_combine_all_ones
92 tracksRegLiveness: true
96 ; Don't combine with -1. We want to avoid blocking immAllOnesV selection
99 ; LOWER-LABEL: name: dont_combine_all_ones
100 ; LOWER: liveins: $d0
101 ; LOWER: %r:_(s8) = G_CONSTANT i8 -1
102 ; LOWER: %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8)
103 ; LOWER: $d0 = COPY %build_vector(<8 x s8>)
104 ; LOWER: RET_ReallyLR implicit $d0
105 ; SELECT-LABEL: name: dont_combine_all_ones
106 ; SELECT: liveins: $d0
107 ; SELECT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
108 ; SELECT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0
109 ; SELECT: $d0 = COPY [[LDRDui]]
110 ; SELECT: RET_ReallyLR implicit $d0
111 %r:_(s8) = G_CONSTANT i8 -1
112 %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r, %r, %r, %r, %r, %r, %r, %r
113 $d0 = COPY %build_vector(<8 x s8>)
114 RET_ReallyLR implicit $d0
118 name: all_zeros_pat_example
120 tracksRegLiveness: true
124 ; We should get a NEGv2i32 here.
126 ; LOWER-LABEL: name: all_zeros_pat_example
127 ; LOWER: liveins: $d0
128 ; LOWER: %v:_(<2 x s32>) = COPY $d0
129 ; LOWER: %cst:_(s32) = G_CONSTANT i32 0
130 ; LOWER: %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst(s32), %cst(s32)
131 ; LOWER: %sub:_(<2 x s32>) = G_SUB %build_vector, %v
132 ; LOWER: $d0 = COPY %sub(<2 x s32>)
133 ; LOWER: RET_ReallyLR implicit $d0
134 ; SELECT-LABEL: name: all_zeros_pat_example
135 ; SELECT: liveins: $d0
136 ; SELECT: %v:fpr64 = COPY $d0
137 ; SELECT: %sub:fpr64 = NEGv2i32 %v
138 ; SELECT: $d0 = COPY %sub
139 ; SELECT: RET_ReallyLR implicit $d0
140 %v:_(<2 x s32>) = COPY $d0
141 %cst:_(s32) = G_CONSTANT i32 0
142 %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst, %cst
143 %sub:_(<2 x s32>) = G_SUB %build_vector, %v
144 $d0 = COPY %sub(<2 x s32>)
145 RET_ReallyLR implicit $d0
149 name: all_ones_pat_example
151 tracksRegLiveness: true
155 ; We should get a BICv8i8 here.
157 ; LOWER-LABEL: name: all_ones_pat_example
158 ; LOWER: liveins: $d0, $d1
159 ; LOWER: %v0:_(<2 x s32>) = COPY $d0
160 ; LOWER: %v1:_(<2 x s32>) = COPY $d1
161 ; LOWER: %cst:_(s32) = G_CONSTANT i32 -1
162 ; LOWER: %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst(s32), %cst(s32)
163 ; LOWER: %xor:_(<2 x s32>) = G_XOR %v0, %build_vector
164 ; LOWER: %and:_(<2 x s32>) = G_AND %v1, %xor
165 ; LOWER: $d0 = COPY %and(<2 x s32>)
166 ; LOWER: RET_ReallyLR implicit $d0
167 ; SELECT-LABEL: name: all_ones_pat_example
168 ; SELECT: liveins: $d0, $d1
169 ; SELECT: %v0:fpr64 = COPY $d0
170 ; SELECT: %v1:fpr64 = COPY $d1
171 ; SELECT: %and:fpr64 = BICv8i8 %v1, %v0
172 ; SELECT: $d0 = COPY %and
173 ; SELECT: RET_ReallyLR implicit $d0
174 %v0:_(<2 x s32>) = COPY $d0
175 %v1:_(<2 x s32>) = COPY $d1
176 %cst:_(s32) = G_CONSTANT i32 -1
177 %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst, %cst
178 %xor:_(<2 x s32>) = G_XOR %v0, %build_vector
179 %and:_(<2 x s32>) = G_AND %v1, %xor
180 $d0 = COPY %and(<2 x s32>)
181 RET_ReallyLR implicit $d0