1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -march=aarch64 -run-pass=legalizer -global-isel-abort=1 %s -o - | FileCheck %s
4 name: test_scalar_mul_small
7 ; CHECK-LABEL: name: test_scalar_mul_small
8 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
9 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
10 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
11 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
12 ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]]
13 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[MUL]](s32)
14 ; CHECK: $x0 = COPY [[ANYEXT]](s64)
17 %2:_(s8) = G_TRUNC %0(s64)
18 %3:_(s8) = G_TRUNC %1(s64)
19 %4:_(s8) = G_MUL %2, %3
20 %5:_(s64) = G_ANYEXT %4(s8)
25 name: test_smul_overflow
28 ; CHECK-LABEL: name: test_smul_overflow
29 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
30 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
31 ; CHECK: [[SMULH:%[0-9]+]]:_(s64) = G_SMULH [[COPY]], [[COPY1]]
32 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
33 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
34 ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]](s64)
35 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s64), [[ASHR]]
36 ; CHECK: $x0 = COPY [[MUL]](s64)
37 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
38 ; CHECK: $w0 = COPY [[COPY2]](s32)
41 %2:_(s64), %3:_(s1) = G_SMULO %0, %1
43 %4:_(s32) = G_ANYEXT %3(s1)
48 name: test_umul_overflow
51 ; CHECK-LABEL: name: test_umul_overflow
52 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
53 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
54 ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY1]]
55 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
56 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
57 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
58 ; CHECK: $x0 = COPY [[MUL]](s64)
59 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
60 ; CHECK: $w0 = COPY [[COPY2]](s32)
63 %2:_(s64), %3:_(s1) = G_UMULO %0, %1
65 %4:_(s32) = G_ANYEXT %3(s1)
70 name: test_smul_overflow_s32
73 ; CHECK-LABEL: name: test_smul_overflow_s32
74 ; CHECK: %lhs:_(s32) = COPY $w0
75 ; CHECK: %rhs:_(s32) = COPY $w1
76 ; CHECK: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH %lhs, %rhs
77 ; CHECK: %mul:_(s32) = G_MUL %lhs, %rhs
78 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
79 ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR %mul, [[C]](s64)
80 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]]
81 ; CHECK: $w0 = COPY %mul(s32)
82 ; CHECK: %ext_overflow:_(s32) = COPY [[ICMP]](s32)
83 ; CHECK: $w0 = COPY %ext_overflow(s32)
84 ; CHECK: RET_ReallyLR implicit $w0
85 %lhs:_(s32) = COPY $w0
86 %rhs:_(s32) = COPY $w1
87 %mul:_(s32), %overflow:_(s1) = G_SMULO %lhs, %rhs
89 %ext_overflow:_(s32) = G_ANYEXT %overflow(s1)
90 $w0 = COPY %ext_overflow(s32)
91 RET_ReallyLR implicit $w0
95 name: test_umul_overflow_s32
98 ; CHECK-LABEL: name: test_umul_overflow_s32
99 ; CHECK: %lhs:_(s32) = COPY $w0
100 ; CHECK: %rhs:_(s32) = COPY $w1
101 ; CHECK: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH %lhs, %rhs
102 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
103 ; CHECK: %mul:_(s32) = G_MUL %lhs, %rhs
104 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s32), [[C]]
105 ; CHECK: $w0 = COPY %mul(s32)
106 ; CHECK: %ext_overflow:_(s32) = COPY [[ICMP]](s32)
107 ; CHECK: $w0 = COPY %ext_overflow(s32)
108 ; CHECK: RET_ReallyLR implicit $w0
109 %lhs:_(s32) = COPY $w0
110 %rhs:_(s32) = COPY $w1
111 %mul:_(s32), %overflow:_(s1) = G_UMULO %lhs, %rhs
113 %ext_overflow:_(s32) = G_ANYEXT %overflow(s1)
114 $w0 = COPY %ext_overflow(s32)
115 RET_ReallyLR implicit $w0
119 name: test_umul_overflow_s24
122 ; CHECK-LABEL: name: test_umul_overflow_s24
123 ; CHECK: %lhs_wide:_(s32) = COPY $w0
124 ; CHECK: %rhs_wide:_(s32) = COPY $w1
125 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
126 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %lhs_wide(s32)
127 ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
128 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %rhs_wide(s32)
129 ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
130 ; CHECK: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[AND]], [[AND1]]
131 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
132 ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
133 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s32), [[C1]]
134 ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
135 ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[MUL]](s32), [[AND2]]
136 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
137 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
138 ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY3]]
139 ; CHECK: %ext_mul:_(s32) = COPY [[MUL]](s32)
140 ; CHECK: $w0 = COPY %ext_mul(s32)
141 ; CHECK: %ext_overflow:_(s32) = COPY [[OR]](s32)
142 ; CHECK: $w0 = COPY %ext_overflow(s32)
143 ; CHECK: RET_ReallyLR implicit $w0
144 %lhs_wide:_(s32) = COPY $w0
145 %rhs_wide:_(s32) = COPY $w1
146 %lhs:_(s24) = G_TRUNC %lhs_wide
147 %rhs:_(s24) = G_TRUNC %rhs_wide
148 %mul:_(s24), %overflow:_(s1) = G_UMULO %lhs, %rhs
149 %ext_mul:_(s32) = G_ANYEXT %mul
150 $w0 = COPY %ext_mul(s32)
151 %ext_overflow:_(s32) = G_ANYEXT %overflow(s1)
152 $w0 = COPY %ext_overflow(s32)
153 RET_ReallyLR implicit $w0
157 name: vector_mul_scalarize
165 ; CHECK-LABEL: name: vector_mul_scalarize
166 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
167 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
168 ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
169 ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
170 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UV]], [[UV2]]
171 ; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[UV1]], [[UV3]]
172 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL1]](s64)
173 ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
174 ; CHECK: RET_ReallyLR implicit $q0
175 %0:_(<2 x s64>) = COPY $q0
176 %1:_(<2 x s64>) = COPY $q1
177 %2:_(<2 x s64>) = G_MUL %0, %1
178 $q0 = COPY %2(<2 x s64>)
179 RET_ReallyLR implicit $q0
182 name: test_umulo_overflow_no_invalid_mir
184 tracksRegLiveness: true
192 - { id: 0, size: 8, alignment: 8 }
193 - { id: 1, size: 8, alignment: 8 }
194 - { id: 2, size: 16, alignment: 16 }
195 - { id: 3, size: 16, alignment: 8 }
196 machineFunctionInfo: {}
199 liveins: $x0, $x1, $x2
200 ; Check that the overflow result doesn't generate incorrect MIR by using a G_CONSTANT 0
201 ; before it's been defined.
202 ; CHECK-LABEL: name: test_umulo_overflow_no_invalid_mir
203 ; CHECK: liveins: $x0, $x1, $x2
204 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
205 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
206 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
207 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
208 ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
209 ; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3
210 ; CHECK: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
211 ; CHECK: G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
212 ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64))
213 ; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s64))
214 ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[LOAD1]]
215 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
216 ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[LOAD1]]
217 ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
218 ; CHECK: G_STORE [[C]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 1)
219 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
220 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
221 ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
222 ; CHECK: $x0 = COPY [[MUL]](s64)
223 ; CHECK: $x1 = COPY [[AND]](s64)
224 ; CHECK: RET_ReallyLR implicit $x0
228 %25:_(s32) = G_CONSTANT i32 0
229 %3:_(p0) = G_FRAME_INDEX %stack.0
230 %4:_(p0) = G_FRAME_INDEX %stack.1
231 %6:_(p0) = G_FRAME_INDEX %stack.3
232 G_STORE %2(s64), %3(p0) :: (store (s64))
233 G_STORE %1(s64), %4(p0) :: (store (s64))
234 %7:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64))
235 %8:_(s64) = G_LOAD %4(p0) :: (dereferenceable load (s64))
236 %9:_(s64), %10:_(s1) = G_UMULO %7, %8
237 %31:_(s64) = G_CONSTANT i64 0
238 G_STORE %31(s64), %6(p0) :: (store (s64), align 1)
239 %16:_(s64) = G_ZEXT %10(s1)
242 RET_ReallyLR implicit $x0