1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
4 ; logic shift reg pattern: and
5 ; already optimized by another pattern
7 define i64 @and_shiftedreg_from_and(i64 %a, i64 %b) {
8 ; CHECK-LABEL: and_shiftedreg_from_and:
10 ; CHECK-NEXT: and x8, x1, x0, asr #23
11 ; CHECK-NEXT: and x0, x8, #0xffffffffff000000
13 %ashr = ashr i64 %a, 23
14 %and = and i64 %ashr, -16777216
19 ; TODO: logic shift reg pattern: bic
21 define i64 @bic_shiftedreg_from_and(i64 %a, i64 %b) {
22 ; CHECK-LABEL: bic_shiftedreg_from_and:
24 ; CHECK-NEXT: mov w8, #16777215 // =0xffffff
25 ; CHECK-NEXT: orn x8, x8, x0, asr #23
26 ; CHECK-NEXT: and x0, x1, x8
28 %ashr = ashr i64 %a, 23
29 %and = and i64 %ashr, -16777216
30 %not = xor i64 %and, -1
35 ; logic shift reg pattern: eon
37 define i64 @eon_shiftedreg_from_and(i64 %a, i64 %b) {
38 ; CHECK-LABEL: eon_shiftedreg_from_and:
40 ; CHECK-NEXT: lsr x8, x0, #17
41 ; CHECK-NEXT: eon x0, x1, x8, lsl #53
44 %and = and i64 %shl, -9007199254740992
45 %xor = xor i64 %and, -1
50 ; logic shift reg pattern: eor
52 define i64 @eor_shiftedreg_from_and(i64 %a, i64 %b) {
53 ; CHECK-LABEL: eor_shiftedreg_from_and:
55 ; CHECK-NEXT: lsr x8, x0, #47
56 ; CHECK-NEXT: eor x0, x1, x8, lsl #24
58 %lshr = lshr i64 %a, 23
59 %and = and i64 %lshr, 2199006478336
60 %or = xor i64 %and, %b
64 ; logic shift reg pattern: mvn
65 ; already optimized by another pattern
67 define i64 @mvn_shiftedreg_from_and(i64 %a) {
68 ; CHECK-LABEL: mvn_shiftedreg_from_and:
70 ; CHECK-NEXT: mov x8, #9007199254740991 // =0x1fffffffffffff
71 ; CHECK-NEXT: orn x0, x8, x0, lsl #36
74 %and = and i64 %shl, -9007199254740992
75 %xor = xor i64 %and, -1
79 ; logic shift reg pattern: orn
80 ; already optimized by another pattern
82 define i64 @orn_shiftedreg_from_and(i64 %a, i64 %b) {
83 ; CHECK-LABEL: orn_shiftedreg_from_and:
85 ; CHECK-NEXT: orn x8, x1, x0, lsr #23
86 ; CHECK-NEXT: orr x0, x8, #0xfffffe0000ffffff
88 %lshr = lshr i64 %a, 23
89 %and = and i64 %lshr, 2199006478336
90 %not = xor i64 %and, -1
95 ; logic shift reg pattern: orr
96 ; srl constant bitwidth == (lowbits + masklen + shiftamt)
98 define i64 @orr_shiftedreg_from_and(i64 %a, i64 %b) {
99 ; CHECK-LABEL: orr_shiftedreg_from_and:
101 ; CHECK-NEXT: lsr x8, x0, #47
102 ; CHECK-NEXT: orr x0, x1, x8, lsl #24
104 %lshr = lshr i64 %a, 23
105 %and = and i64 %lshr, 2199006478336 ; 0x1ffff000000
106 %or = or i64 %and, %b
110 ; logic shift reg pattern: orr
111 ; srl constant bitwidth < (lowbits + masklen + shiftamt)
113 define i64 @orr_shiftedreg_from_and_mask2(i64 %a, i64 %b) {
114 ; CHECK-LABEL: orr_shiftedreg_from_and_mask2:
116 ; CHECK-NEXT: lsr x8, x0, #47
117 ; CHECK-NEXT: orr x0, x1, x8, lsl #24
119 %lshr = lshr i64 %a, 23
120 %and = and i64 %lshr, 4398029733888 ; 0x3ffff000000
121 %or = or i64 %and, %b
126 ; arithmetic shift reg pattern: add
128 define i32 @add_shiftedreg_from_and(i32 %a, i32 %b) {
129 ; CHECK-LABEL: add_shiftedreg_from_and:
131 ; CHECK-NEXT: asr w8, w0, #27
132 ; CHECK-NEXT: add w0, w1, w8, lsl #24
134 %ashr = ashr i32 %a, 3
135 %and = and i32 %ashr, -16777216
136 %add = add i32 %and, %b
140 ; arithmetic shift reg pattern: sub
142 define i64 @sub_shiftedreg_from_and_shl(i64 %a, i64 %b) {
143 ; CHECK-LABEL: sub_shiftedreg_from_and_shl:
145 ; CHECK-NEXT: lsr x8, x0, #17
146 ; CHECK-NEXT: sub x0, x1, x8, lsl #53
148 %shl = shl i64 %a, 36
149 %and = and i64 %shl, -9007199254740992
150 %sub = sub i64 %b, %and
154 ; negative test: type is not i32 or i64
156 define <2 x i32> @shiftedreg_from_and_negative_type(<2 x i32> %a, <2 x i32> %b) {
157 ; CHECK-LABEL: shiftedreg_from_and_negative_type:
159 ; CHECK-NEXT: shl v0.2s, v0.2s, #2
160 ; CHECK-NEXT: bic v0.2s, #28
161 ; CHECK-NEXT: sub v0.2s, v1.2s, v0.2s
163 %shl = shl <2 x i32> %a, <i32 2, i32 2>
164 %and = and <2 x i32> %shl, <i32 -32, i32 -32>
165 %sub = sub <2 x i32> %b, %and
169 ; negative test: shift one-use
171 define i32 @shiftedreg_from_and_negative_oneuse1(i32 %a, i32 %b) {
172 ; CHECK-LABEL: shiftedreg_from_and_negative_oneuse1:
174 ; CHECK-NEXT: asr w8, w0, #23
175 ; CHECK-NEXT: and w9, w8, #0xff000000
176 ; CHECK-NEXT: add w9, w9, w1
177 ; CHECK-NEXT: mul w0, w8, w9
179 %ashr = ashr i32 %a, 23
180 %and = and i32 %ashr, -16777216
181 %add = add i32 %and, %b
182 %r = mul i32 %ashr, %add
186 ; negative test: and one-use
188 define i32 @shiftedreg_from_and_negative_oneuse2(i32 %a, i32 %b) {
189 ; CHECK-LABEL: shiftedreg_from_and_negative_oneuse2:
191 ; CHECK-NEXT: asr w8, w0, #23
192 ; CHECK-NEXT: and w8, w8, #0xff000000
193 ; CHECK-NEXT: add w9, w8, w1
194 ; CHECK-NEXT: mul w0, w8, w9
196 %ashr = ashr i32 %a, 23
197 %and = and i32 %ashr, -16777216
198 %add = add i32 %and, %b
199 %r = mul i32 %and, %add
203 ; negative test: and c is not mask
205 define i32 @shiftedreg_from_and_negative_andc1(i32 %a, i32 %b) {
206 ; CHECK-LABEL: shiftedreg_from_and_negative_andc1:
208 ; CHECK-NEXT: mov w8, #26215 // =0x6667
209 ; CHECK-NEXT: movk w8, #65510, lsl #16
210 ; CHECK-NEXT: and w8, w8, w0, asr #23
211 ; CHECK-NEXT: add w0, w8, w1
213 %ashr = ashr i32 %a, 23
214 %and = and i32 %ashr, -1677721
215 %add = add i32 %and, %b
219 ; negative test: sra with and c is not legal mask
221 define i32 @shiftedreg_from_and_negative_andc2(i32 %a, i32 %b) {
222 ; CHECK-LABEL: shiftedreg_from_and_negative_andc2:
224 ; CHECK-NEXT: mov w8, #-285212672 // =0xef000000
225 ; CHECK-NEXT: and w8, w8, w0, asr #23
226 ; CHECK-NEXT: add w0, w8, w1
228 %ashr = ashr i32 %a, 23
229 %and = and i32 %ashr, 4009754624 ; 0xef000000
230 %add = add i32 %and, %b
234 ; negative test: shl with and c is not legal mask
236 define i64 @shiftedreg_from_and_negative_andc3(i64 %a, i64 %b) {
237 ; CHECK-LABEL: shiftedreg_from_and_negative_andc3:
239 ; CHECK-NEXT: eor x0, x1, x0, lsl #36
241 %shl = shl i64 %a, 36
242 %and = and i64 %shl, -4294967296
243 %xor = xor i64 %and, %b
247 ; negative test: shl with and c is not legal mask
249 define i64 @shiftedreg_from_and_negative_andc4(i64 %a, i64 %b) {
250 ; CHECK-LABEL: shiftedreg_from_and_negative_andc4:
252 ; CHECK-NEXT: lsl x8, x0, #36
253 ; CHECK-NEXT: and x8, x8, #0x7fe0000000000000
254 ; CHECK-NEXT: eor x0, x8, x1
256 %shl = shl i64 %a, 36
257 %and = and i64 %shl, 9214364837600034816
258 %xor = xor i64 %and, %b
262 ; negative test: sra with and c is not legal mask
264 define i32 @shiftedreg_from_and_negative_andc5(i32 %a, i32 %b) {
265 ; CHECK-LABEL: shiftedreg_from_and_negative_andc5:
267 ; CHECK-NEXT: asr w8, w0, #23
268 ; CHECK-NEXT: and w8, w8, #0xff000000
269 ; CHECK-NEXT: add w0, w8, w1
271 %ashr = ashr i32 %a, 23
272 %and = and i32 %ashr, -16777216
273 %add = add i32 %and, %b
277 ; negative test: srl with and c is not legal mask
278 ; srl constant bitwidth > (lowbits + masklen + shiftamt)
280 define i64 @shiftedreg_from_and_negative_andc6(i64 %a, i64 %b) {
281 ; CHECK-LABEL: shiftedreg_from_and_negative_andc6:
283 ; CHECK-NEXT: lsr x8, x0, #2
284 ; CHECK-NEXT: and x8, x8, #0x6
285 ; CHECK-NEXT: add x0, x8, x1
287 %lshr = lshr i64 %a, 2
288 %and = and i64 %lshr, 6
289 %add = add i64 %and, %b