1 ; RUN: llc -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
2 ; RUN: llc -O3 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefix=O3
4 ; This file checks that the translation from llvm IR to generic MachineInstr
6 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
7 target triple = "aarch64--"
10 ; CHECK-LABEL: name: addi64
11 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
12 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
13 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_ADD [[ARG1]], [[ARG2]]
14 ; CHECK-NEXT: $x0 = COPY [[RES]]
15 ; CHECK-NEXT: RET_ReallyLR implicit $x0
16 define i64 @addi64(i64 %arg1, i64 %arg2) {
17 %res = add i64 %arg1, %arg2
21 ; CHECK-LABEL: name: muli64
22 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
23 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
24 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_MUL [[ARG1]], [[ARG2]]
25 ; CHECK-NEXT: $x0 = COPY [[RES]]
26 ; CHECK-NEXT: RET_ReallyLR implicit $x0
27 define i64 @muli64(i64 %arg1, i64 %arg2) {
28 %res = mul i64 %arg1, %arg2
33 ; CHECK-LABEL: name: allocai64
35 ; CHECK-NEXT: - { id: 0, name: ptr1, type: default, offset: 0, size: 8, alignment: 8,
36 ; CHECK-NEXT: stack-id: default, callee-saved-register: '', callee-saved-restored: true,
37 ; CHECK-NEXT: debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
38 ; CHECK-NEXT: - { id: 1, name: ptr2, type: default, offset: 0, size: 8, alignment: 1,
39 ; CHECK-NEXT: stack-id: default, callee-saved-register: '', callee-saved-restored: true,
40 ; CHECK-NEXT: debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
41 ; CHECK-NEXT: - { id: 2, name: ptr3, type: default, offset: 0, size: 128, alignment: 8,
42 ; CHECK-NEXT: stack-id: default, callee-saved-register: '', callee-saved-restored: true,
43 ; CHECK-NEXT: debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
44 ; CHECK-NEXT: - { id: 3, name: ptr4, type: default, offset: 0, size: 1, alignment: 8,
45 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.0.ptr1
46 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.1.ptr2
47 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.2.ptr3
48 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.3.ptr4
49 define void @allocai64() {
51 %ptr2 = alloca i64, align 1
52 %ptr3 = alloca i64, i32 16
53 %ptr4 = alloca [0 x i64]
58 ; CHECK-LABEL: name: uncondbr
61 ; ABI/constant lowering and IR-level entry basic block.
62 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
64 ; Make sure we have one successor and only one.
65 ; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+]](0x80000000)
67 ; Check that we emit the correct branch.
68 ; CHECK: G_BR %[[BB2]]
70 ; Check that end contains the return instruction.
71 ; CHECK: [[END:bb.[0-9]+]].{{[a-zA-Z0-9.]+}}:
72 ; CHECK-NEXT: RET_ReallyLR
74 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
75 ; CHECK-NEXT: successors: %[[END]](0x80000000)
76 ; CHECK: G_BR %[[END]]
77 define void @uncondbr() {
86 ; CHECK-LABEL: name: uncondbr_fallthrough
88 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
89 ; CHECK-NEXT: successors: %[[END:bb.[0-9]+]](0x80000000)
90 ; We don't emit a branch here, as we can fallthrough to the successor.
92 ; CHECK: [[END]].{{[a-zA-Z0-9.]+}}:
93 ; CHECK-NEXT: RET_ReallyLR
94 define void @uncondbr_fallthrough() {
101 ; Tests for conditional br.
102 ; CHECK-LABEL: name: condbr
105 ; ABI/constant lowering and IR-level entry basic block.
106 ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
107 ; Make sure we have two successors
108 ; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000),
109 ; CHECK: %[[FALSE:bb.[0-9]+]](0x40000000)
111 ; CHECK: [[ADDR:%.*]]:_(p0) = COPY $x0
113 ; Check that we emit the correct branch.
114 ; CHECK: [[TST:%.*]]:_(s1) = G_LOAD [[ADDR]](p0)
115 ; CHECK: G_BRCOND [[TST]](s1), %[[TRUE]]
116 ; CHECK: G_BR %[[FALSE]]
118 ; Check that each successor contains the return instruction.
119 ; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}:
120 ; CHECK-NEXT: RET_ReallyLR
121 ; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
122 ; CHECK-NEXT: RET_ReallyLR
123 define void @condbr(i1* %tstaddr) {
124 %tst = load i1, i1* %tstaddr
125 br i1 %tst, label %true, label %false
132 ; Tests for indirect br.
133 ; CHECK-LABEL: name: indirectbr
136 ; ABI/constant lowering and IR-level entry basic block.
137 ; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}:
138 ; Make sure we have one successor
139 ; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+]](0x80000000)
142 ; Check basic block L1 has 2 successors: BBL1 and BBL2
143 ; CHECK: [[BB_L1]].{{[a-zA-Z0-9.]+}} (address-taken):
144 ; CHECK-NEXT: successors: %[[BB_L1]](0x40000000),
145 ; CHECK: %[[BB_L2:bb.[0-9]+]](0x40000000)
146 ; CHECK: G_BRINDIRECT %{{[0-9]+}}(p0)
148 ; Check basic block L2 is the return basic block
149 ; CHECK: [[BB_L2]].{{[a-zA-Z0-9.]+}} (address-taken):
150 ; CHECK-NEXT: RET_ReallyLR
152 @indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8
154 define void @indirectbr() {
157 L1: ; preds = %entry, %L1
158 %i = phi i32 [ 0, %entry ], [ %inc, %L1 ]
160 %idxprom = zext i32 %i to i64
161 %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @indirectbr.L, i64 0, i64 %idxprom
162 %brtarget = load i8*, i8** %arrayidx, align 8
163 indirectbr i8* %brtarget, [label %L1, label %L2]
169 ; CHECK-LABEL: name: ori64
170 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
171 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
172 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_OR [[ARG1]], [[ARG2]]
173 ; CHECK-NEXT: $x0 = COPY [[RES]]
174 ; CHECK-NEXT: RET_ReallyLR implicit $x0
175 define i64 @ori64(i64 %arg1, i64 %arg2) {
176 %res = or i64 %arg1, %arg2
180 ; CHECK-LABEL: name: ori32
181 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
182 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
183 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_OR [[ARG1]], [[ARG2]]
184 ; CHECK-NEXT: $w0 = COPY [[RES]]
185 ; CHECK-NEXT: RET_ReallyLR implicit $w0
186 define i32 @ori32(i32 %arg1, i32 %arg2) {
187 %res = or i32 %arg1, %arg2
192 ; CHECK-LABEL: name: xori64
193 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
194 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
195 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_XOR [[ARG1]], [[ARG2]]
196 ; CHECK-NEXT: $x0 = COPY [[RES]]
197 ; CHECK-NEXT: RET_ReallyLR implicit $x0
198 define i64 @xori64(i64 %arg1, i64 %arg2) {
199 %res = xor i64 %arg1, %arg2
203 ; CHECK-LABEL: name: xori32
204 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
205 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
206 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_XOR [[ARG1]], [[ARG2]]
207 ; CHECK-NEXT: $w0 = COPY [[RES]]
208 ; CHECK-NEXT: RET_ReallyLR implicit $w0
209 define i32 @xori32(i32 %arg1, i32 %arg2) {
210 %res = xor i32 %arg1, %arg2
215 ; CHECK-LABEL: name: andi64
216 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
217 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
218 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_AND [[ARG1]], [[ARG2]]
219 ; CHECK-NEXT: $x0 = COPY [[RES]]
220 ; CHECK-NEXT: RET_ReallyLR implicit $x0
221 define i64 @andi64(i64 %arg1, i64 %arg2) {
222 %res = and i64 %arg1, %arg2
226 ; CHECK-LABEL: name: andi32
227 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
228 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
229 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_AND [[ARG1]], [[ARG2]]
230 ; CHECK-NEXT: $w0 = COPY [[RES]]
231 ; CHECK-NEXT: RET_ReallyLR implicit $w0
232 define i32 @andi32(i32 %arg1, i32 %arg2) {
233 %res = and i32 %arg1, %arg2
238 ; CHECK-LABEL: name: subi64
239 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
240 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
241 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_SUB [[ARG1]], [[ARG2]]
242 ; CHECK-NEXT: $x0 = COPY [[RES]]
243 ; CHECK-NEXT: RET_ReallyLR implicit $x0
244 define i64 @subi64(i64 %arg1, i64 %arg2) {
245 %res = sub i64 %arg1, %arg2
249 ; CHECK-LABEL: name: subi32
250 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
251 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
252 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SUB [[ARG1]], [[ARG2]]
253 ; CHECK-NEXT: $w0 = COPY [[RES]]
254 ; CHECK-NEXT: RET_ReallyLR implicit $w0
255 define i32 @subi32(i32 %arg1, i32 %arg2) {
256 %res = sub i32 %arg1, %arg2
260 ; CHECK-LABEL: name: ptrtoint
261 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
262 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[ARG1]]
263 ; CHECK: $x0 = COPY [[RES]]
264 ; CHECK: RET_ReallyLR implicit $x0
265 define i64 @ptrtoint(i64* %a) {
266 %val = ptrtoint i64* %a to i64
270 ; CHECK-LABEL: name: inttoptr
271 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
272 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_INTTOPTR [[ARG1]]
273 ; CHECK: $x0 = COPY [[RES]]
274 ; CHECK: RET_ReallyLR implicit $x0
275 define i64* @inttoptr(i64 %a) {
276 %val = inttoptr i64 %a to i64*
280 ; CHECK-LABEL: name: trivial_bitcast
281 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
282 ; CHECK: $x0 = COPY [[ARG1]]
283 ; CHECK: RET_ReallyLR implicit $x0
284 define i64* @trivial_bitcast(i8* %a) {
285 %val = bitcast i8* %a to i64*
289 ; CHECK-LABEL: name: trivial_bitcast_with_copy
290 ; CHECK: [[A:%[0-9]+]]:_(p0) = COPY $x0
291 ; CHECK: G_BR %[[CAST:bb\.[0-9]+]]
293 ; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}:
294 ; CHECK: $x0 = COPY [[A]]
296 ; CHECK: [[CAST]].{{[a-zA-Z0-9.]+}}:
297 ; CHECK: G_BR %[[END]]
298 define i64* @trivial_bitcast_with_copy(i8* %a) {
305 %val = bitcast i8* %a to i64*
309 ; CHECK-LABEL: name: bitcast
310 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
311 ; CHECK: [[RES1:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[ARG1]]
312 ; CHECK: [[RES2:%[0-9]+]]:_(s64) = G_BITCAST [[RES1]]
313 ; CHECK: $x0 = COPY [[RES2]]
314 ; CHECK: RET_ReallyLR implicit $x0
315 define i64 @bitcast(i64 %a) {
316 %res1 = bitcast i64 %a to <2 x i32>
317 %res2 = bitcast <2 x i32> %res1 to i64
321 ; CHECK-LABEL: name: addrspacecast
322 ; CHECK: [[ARG1:%[0-9]+]]:_(p1) = COPY $x0
323 ; CHECK: [[RES1:%[0-9]+]]:_(p2) = G_ADDRSPACE_CAST [[ARG1]]
324 ; CHECK: [[RES2:%[0-9]+]]:_(p0) = G_ADDRSPACE_CAST [[RES1]]
325 ; CHECK: $x0 = COPY [[RES2]]
326 ; CHECK: RET_ReallyLR implicit $x0
327 define i64* @addrspacecast(i32 addrspace(1)* %a) {
328 %res1 = addrspacecast i32 addrspace(1)* %a to i64 addrspace(2)*
329 %res2 = addrspacecast i64 addrspace(2)* %res1 to i64*
333 ; CHECK-LABEL: name: trunc
334 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
335 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_LOAD
336 ; CHECK: [[RES1:%[0-9]+]]:_(s8) = G_TRUNC [[ARG1]]
337 ; CHECK: [[RES2:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[VEC]]
338 define void @trunc(i64 %a) {
339 %vecptr = alloca <4 x i32>
340 %vec = load <4 x i32>, <4 x i32>* %vecptr
341 %res1 = trunc i64 %a to i8
342 %res2 = trunc <4 x i32> %vec to <4 x i16>
346 ; CHECK-LABEL: name: load
347 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
348 ; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
349 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 16)
350 ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42, addrspace 42)
351 ; CHECK: [[SUM2:%.*]]:_(s64) = G_ADD [[VAL1]], [[VAL2]]
352 ; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load 8 from %ir.addr)
353 ; CHECK: [[SUM3:%[0-9]+]]:_(s64) = G_ADD [[SUM2]], [[VAL3]]
354 ; CHECK: [[VAL4:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, !range !0)
355 ; CHECK: [[SUM4:%[0-9]+]]:_(s64) = G_ADD [[SUM3]], [[VAL4]]
356 ; CHECK: $x0 = COPY [[SUM4]]
357 ; CHECK: RET_ReallyLR implicit $x0
358 define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
359 %val1 = load i64, i64* %addr, align 16
361 %val2 = load i64, i64 addrspace(42)* %addr42
362 %sum2 = add i64 %val1, %val2
364 %val3 = load volatile i64, i64* %addr
365 %sum3 = add i64 %sum2, %val3
367 %val4 = load i64, i64* %addr, !range !0
368 %sum4 = add i64 %sum3, %val4
372 ; CHECK-LABEL: name: store
373 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
374 ; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
375 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY $x2
376 ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY $x3
377 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 16)
378 ; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42, addrspace 42)
379 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store 8 into %ir.addr)
380 ; CHECK: RET_ReallyLR
381 define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) {
382 store i64 %val1, i64* %addr, align 16
383 store i64 %val2, i64 addrspace(42)* %addr42
384 store volatile i64 %val1, i64* %addr
385 %sum = add i64 %val1, %val2
389 ; CHECK-LABEL: name: intrinsics
390 ; CHECK: [[CUR:%[0-9]+]]:_(s32) = COPY $w0
391 ; CHECK: [[BITS:%[0-9]+]]:_(s32) = COPY $w1
392 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), 0
393 ; CHECK: [[PTR_VEC:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.ptr.vec
394 ; CHECK: [[VEC:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[PTR_VEC]]
395 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.st2), [[VEC]](<8 x s8>), [[VEC]](<8 x s8>), [[PTR]](p0)
396 ; CHECK: RET_ReallyLR
397 declare i8* @llvm.returnaddress(i32)
398 declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
399 declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
400 define void @intrinsics(i32 %cur, i32 %bits) {
401 %ptr = call i8* @llvm.returnaddress(i32 0)
402 %ptr.vec = alloca <8 x i8>
403 %vec = load <8 x i8>, <8 x i8>* %ptr.vec
404 call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec, <8 x i8> %vec, i8* %ptr)
408 ; CHECK-LABEL: name: test_phi
409 ; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+]]
410 ; CHECK: G_BR %[[FALSE:bb\.[0-9]+]]
412 ; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}:
413 ; CHECK: [[RES1:%[0-9]+]]:_(s32) = G_LOAD
415 ; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
416 ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_LOAD
418 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]]
419 ; CHECK: $w0 = COPY [[RES]]
420 define i32 @test_phi(i32* %addr1, i32* %addr2, i1 %tst) {
421 br i1 %tst, label %true, label %false
424 %res1 = load i32, i32* %addr1
428 %res2 = load i32, i32* %addr2
432 %res = phi i32 [%res1, %true], [%res2, %false]
436 ; CHECK-LABEL: name: unreachable
440 define void @unreachable(i32 %a) {
441 %sum = add i32 %a, %a
445 ; It's important that constants are after argument passing, but before the
446 ; rest of the entry block.
447 ; CHECK-LABEL: name: constant_int
448 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
449 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
451 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
452 ; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
453 ; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
454 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]]
455 ; CHECK: $w0 = COPY [[RES]]
457 define i32 @constant_int(i32 %in) {
461 %sum1 = add i32 %in, 1
462 %sum2 = add i32 %in, 1
463 %res = add i32 %sum1, %sum2
467 ; CHECK-LABEL: name: constant_int_start
468 ; CHECK: [[TWO:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
469 ; CHECK: [[ANSWER:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
470 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
471 define i32 @constant_int_start() {
476 ; CHECK-LABEL: name: test_undef
477 ; CHECK: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
478 ; CHECK: $w0 = COPY [[UNDEF]]
479 define i32 @test_undef() {
483 ; CHECK-LABEL: name: test_constant_inttoptr
484 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
485 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ONE]]
486 ; CHECK: $x0 = COPY [[PTR]]
487 define i8* @test_constant_inttoptr() {
488 ret i8* inttoptr(i64 1 to i8*)
491 ; This failed purely because the Constant -> VReg map was kept across
492 ; functions, so reuse the "i64 1" from above.
493 ; CHECK-LABEL: name: test_reused_constant
494 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
495 ; CHECK: $x0 = COPY [[ONE]]
496 define i64 @test_reused_constant() {
500 ; CHECK-LABEL: name: test_sext
501 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
502 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_SEXT [[IN]]
503 ; CHECK: $x0 = COPY [[RES]]
504 define i64 @test_sext(i32 %in) {
505 %res = sext i32 %in to i64
509 ; CHECK-LABEL: name: test_zext
510 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
511 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ZEXT [[IN]]
512 ; CHECK: $x0 = COPY [[RES]]
513 define i64 @test_zext(i32 %in) {
514 %res = zext i32 %in to i64
518 ; CHECK-LABEL: name: test_shl
519 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
520 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
521 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SHL [[ARG1]], [[ARG2]]
522 ; CHECK-NEXT: $w0 = COPY [[RES]]
523 ; CHECK-NEXT: RET_ReallyLR implicit $w0
524 define i32 @test_shl(i32 %arg1, i32 %arg2) {
525 %res = shl i32 %arg1, %arg2
530 ; CHECK-LABEL: name: test_lshr
531 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
532 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
533 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_LSHR [[ARG1]], [[ARG2]]
534 ; CHECK-NEXT: $w0 = COPY [[RES]]
535 ; CHECK-NEXT: RET_ReallyLR implicit $w0
536 define i32 @test_lshr(i32 %arg1, i32 %arg2) {
537 %res = lshr i32 %arg1, %arg2
541 ; CHECK-LABEL: name: test_ashr
542 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
543 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
544 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_ASHR [[ARG1]], [[ARG2]]
545 ; CHECK-NEXT: $w0 = COPY [[RES]]
546 ; CHECK-NEXT: RET_ReallyLR implicit $w0
547 define i32 @test_ashr(i32 %arg1, i32 %arg2) {
548 %res = ashr i32 %arg1, %arg2
552 ; CHECK-LABEL: name: test_sdiv
553 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
554 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
555 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SDIV [[ARG1]], [[ARG2]]
556 ; CHECK-NEXT: $w0 = COPY [[RES]]
557 ; CHECK-NEXT: RET_ReallyLR implicit $w0
558 define i32 @test_sdiv(i32 %arg1, i32 %arg2) {
559 %res = sdiv i32 %arg1, %arg2
563 ; CHECK-LABEL: name: test_udiv
564 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
565 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
566 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UDIV [[ARG1]], [[ARG2]]
567 ; CHECK-NEXT: $w0 = COPY [[RES]]
568 ; CHECK-NEXT: RET_ReallyLR implicit $w0
569 define i32 @test_udiv(i32 %arg1, i32 %arg2) {
570 %res = udiv i32 %arg1, %arg2
574 ; CHECK-LABEL: name: test_srem
575 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
576 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
577 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SREM [[ARG1]], [[ARG2]]
578 ; CHECK-NEXT: $w0 = COPY [[RES]]
579 ; CHECK-NEXT: RET_ReallyLR implicit $w0
580 define i32 @test_srem(i32 %arg1, i32 %arg2) {
581 %res = srem i32 %arg1, %arg2
585 ; CHECK-LABEL: name: test_urem
586 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
587 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
588 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UREM [[ARG1]], [[ARG2]]
589 ; CHECK-NEXT: $w0 = COPY [[RES]]
590 ; CHECK-NEXT: RET_ReallyLR implicit $w0
591 define i32 @test_urem(i32 %arg1, i32 %arg2) {
592 %res = urem i32 %arg1, %arg2
596 ; CHECK-LABEL: name: test_constant_null
597 ; CHECK: [[ZERO:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
598 ; CHECK: [[NULL:%[0-9]+]]:_(p0) = G_INTTOPTR [[ZERO]]
599 ; CHECK: $x0 = COPY [[NULL]]
600 define i8* @test_constant_null() {
604 ; CHECK-LABEL: name: test_struct_memops
605 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
606 ; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
607 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
608 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST1]](s64)
609 ; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.addr + 4)
610 ; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store 1 into %ir.addr, align 4)
611 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST1]](s64)
612 ; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store 4 into %ir.addr + 4)
613 define void @test_struct_memops({ i8, i32 }* %addr) {
614 %val = load { i8, i32 }, { i8, i32 }* %addr
615 store { i8, i32 } %val, { i8, i32 }* %addr
619 ; CHECK-LABEL: name: test_i1_memops
620 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
621 ; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load 1 from %ir.addr)
622 ; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store 1 into %ir.addr)
623 define void @test_i1_memops(i1* %addr) {
624 %val = load i1, i1* %addr
625 store i1 %val, i1* %addr
629 ; CHECK-LABEL: name: int_comparison
630 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
631 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
632 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
633 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LHS]](s32), [[RHS]]
634 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
635 define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
636 %res = icmp ne i32 %a, %b
637 store i1 %res, i1* %addr
641 ; CHECK-LABEL: name: ptr_comparison
642 ; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x0
643 ; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x1
644 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
645 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LHS]](p0), [[RHS]]
646 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
647 define void @ptr_comparison(i8* %a, i8* %b, i1* %addr) {
648 %res = icmp eq i8* %a, %b
649 store i1 %res, i1* %addr
653 ; CHECK-LABEL: name: test_fadd
654 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
655 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
656 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FADD [[ARG1]], [[ARG2]]
657 ; CHECK-NEXT: $s0 = COPY [[RES]]
658 ; CHECK-NEXT: RET_ReallyLR implicit $s0
659 define float @test_fadd(float %arg1, float %arg2) {
660 %res = fadd float %arg1, %arg2
664 ; CHECK-LABEL: name: test_fsub
665 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
666 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
667 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FSUB [[ARG1]], [[ARG2]]
668 ; CHECK-NEXT: $s0 = COPY [[RES]]
669 ; CHECK-NEXT: RET_ReallyLR implicit $s0
670 define float @test_fsub(float %arg1, float %arg2) {
671 %res = fsub float %arg1, %arg2
675 ; CHECK-LABEL: name: test_fmul
676 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
677 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
678 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FMUL [[ARG1]], [[ARG2]]
679 ; CHECK-NEXT: $s0 = COPY [[RES]]
680 ; CHECK-NEXT: RET_ReallyLR implicit $s0
681 define float @test_fmul(float %arg1, float %arg2) {
682 %res = fmul float %arg1, %arg2
686 ; CHECK-LABEL: name: test_fdiv
687 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
688 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
689 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FDIV [[ARG1]], [[ARG2]]
690 ; CHECK-NEXT: $s0 = COPY [[RES]]
691 ; CHECK-NEXT: RET_ReallyLR implicit $s0
692 define float @test_fdiv(float %arg1, float %arg2) {
693 %res = fdiv float %arg1, %arg2
697 ; CHECK-LABEL: name: test_frem
698 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
699 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
700 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FREM [[ARG1]], [[ARG2]]
701 ; CHECK-NEXT: $s0 = COPY [[RES]]
702 ; CHECK-NEXT: RET_ReallyLR implicit $s0
703 define float @test_frem(float %arg1, float %arg2) {
704 %res = frem float %arg1, %arg2
708 ; CHECK-LABEL: name: test_fneg
709 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
710 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FNEG [[ARG1]]
711 ; CHECK-NEXT: $s0 = COPY [[RES]]
712 ; CHECK-NEXT: RET_ReallyLR implicit $s0
713 define float @test_fneg(float %arg1) {
714 %res = fneg float %arg1
718 ; CHECK-LABEL: name: test_fneg_fmf
719 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
720 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FNEG [[ARG1]]
721 ; CHECK-NEXT: $s0 = COPY [[RES]]
722 ; CHECK-NEXT: RET_ReallyLR implicit $s0
723 define float @test_fneg_fmf(float %arg1) {
724 %res = fneg fast float %arg1
728 ; CHECK-LABEL: name: test_sadd_overflow
729 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
730 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
731 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
732 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]]
733 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
734 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
735 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
736 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
737 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
738 define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
739 %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %lhs, i32 %rhs)
740 store { i32, i1 } %res, { i32, i1 }* %addr
744 ; CHECK-LABEL: name: test_uadd_overflow
745 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
746 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
747 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
748 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDO [[LHS]], [[RHS]]
749 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
750 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
751 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
752 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
753 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
754 define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
755 %res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
756 store { i32, i1 } %res, { i32, i1 }* %addr
760 ; CHECK-LABEL: name: test_ssub_overflow
761 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
762 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
763 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
764 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]]
765 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.subr)
766 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
767 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
768 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.subr + 4, align 4)
769 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
770 define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
771 %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %lhs, i32 %rhs)
772 store { i32, i1 } %res, { i32, i1 }* %subr
776 ; CHECK-LABEL: name: test_usub_overflow
777 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
778 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
779 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
780 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBO [[LHS]], [[RHS]]
781 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.subr)
782 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
783 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
784 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.subr + 4, align 4)
785 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
786 define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
787 %res = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %lhs, i32 %rhs)
788 store { i32, i1 } %res, { i32, i1 }* %subr
792 ; CHECK-LABEL: name: test_smul_overflow
793 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
794 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
795 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
796 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]]
797 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
798 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
799 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
800 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
801 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
802 define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
803 %res = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %lhs, i32 %rhs)
804 store { i32, i1 } %res, { i32, i1 }* %addr
808 ; CHECK-LABEL: name: test_umul_overflow
809 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
810 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
811 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
812 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]]
813 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
814 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
815 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST]](s64)
816 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
817 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
818 define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
819 %res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
820 store { i32, i1 } %res, { i32, i1 }* %addr
824 ; CHECK-LABEL: name: test_extractvalue
825 ; CHECK: %0:_(p0) = COPY $x0
826 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
827 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
828 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
829 ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
830 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
831 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
832 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
833 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
834 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
835 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
836 ; CHECK: $w0 = COPY [[LD3]](s32)
837 %struct.nested = type {i8, { i8, i32 }, i32}
838 define i32 @test_extractvalue(%struct.nested* %addr) {
839 %struct = load %struct.nested, %struct.nested* %addr
840 %res = extractvalue %struct.nested %struct, 1, 1
844 ; CHECK-LABEL: name: test_extractvalue_agg
845 ; CHECK: %0:_(p0) = COPY $x0
846 ; CHECK: %1:_(p0) = COPY $x1
847 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
848 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
849 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
850 ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
851 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
852 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
853 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
854 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
855 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
856 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
857 ; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store 1 into %ir.addr2, align 4)
858 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP %1, [[CST1]](s64)
859 ; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store 4 into %ir.addr2 + 4)
860 define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
861 %struct = load %struct.nested, %struct.nested* %addr
862 %res = extractvalue %struct.nested %struct, 1
863 store {i8, i32} %res, {i8, i32}* %addr2
867 ; CHECK-LABEL: name: test_trivial_extract_ptr
868 ; CHECK: [[STRUCT:%[0-9]+]]:_(p0) = COPY $x0
869 ; CHECK: [[VAL32:%[0-9]+]]:_(s32) = COPY $w1
870 ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_TRUNC [[VAL32]]
871 ; CHECK: G_STORE [[VAL]](s8), [[STRUCT]](p0)
872 define void @test_trivial_extract_ptr([1 x i8*] %s, i8 %val) {
873 %addr = extractvalue [1 x i8*] %s, 0
874 store i8 %val, i8* %addr
878 ; CHECK-LABEL: name: test_insertvalue
879 ; CHECK: %0:_(p0) = COPY $x0
880 ; CHECK: %1:_(s32) = COPY $w1
881 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
882 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
883 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
884 ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
885 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
886 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
887 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
888 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
889 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
890 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
891 ; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store 1 into %ir.addr, align 4)
892 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
893 ; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store 1 into %ir.addr + 4, align 4)
894 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP %0, [[CST2]](s64)
895 ; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store 4 into %ir.addr + 8)
896 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
897 ; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store 4 into %ir.addr + 12)
898 define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
899 %struct = load %struct.nested, %struct.nested* %addr
900 %newstruct = insertvalue %struct.nested %struct, i32 %val, 1, 1
901 store %struct.nested %newstruct, %struct.nested* %addr
905 define [1 x i64] @test_trivial_insert([1 x i64] %s, i64 %val) {
906 ; CHECK-LABEL: name: test_trivial_insert
907 ; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY $x0
908 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = COPY $x1
909 ; CHECK: $x0 = COPY [[VAL]]
910 %res = insertvalue [1 x i64] %s, i64 %val, 0
914 define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
915 ; CHECK-LABEL: name: test_trivial_insert_ptr
916 ; CHECK: [[STRUCT:%[0-9]+]]:_(p0) = COPY $x0
917 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY $x1
918 ; CHECK: $x0 = COPY [[VAL]]
919 %res = insertvalue [1 x i8*] %s, i8* %val, 0
923 ; CHECK-LABEL: name: test_insertvalue_agg
924 ; CHECK: %0:_(p0) = COPY $x0
925 ; CHECK: %1:_(p0) = COPY $x1
926 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load 1 from %ir.addr2, align 4)
927 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
928 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP %1, [[CST1]](s64)
929 ; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.addr2 + 4)
930 ; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
931 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
932 ; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load 1 from %ir.addr + 4, align 4)
933 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
934 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
935 ; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 8)
936 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
937 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP %0, [[CST4]](s64)
938 ; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.addr + 12)
939 ; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store 1 into %ir.addr, align 4)
940 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP %0, [[CST1]](s64)
941 ; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store 1 into %ir.addr + 4, align 4)
942 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP %0, [[CST3]](s64)
943 ; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store 4 into %ir.addr + 8)
944 ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_GEP %0, [[CST4]](s64)
945 ; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store 4 into %ir.addr + 12)
946 define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
947 %smallstruct = load {i8, i32}, {i8, i32}* %addr2
948 %struct = load %struct.nested, %struct.nested* %addr
949 %res = insertvalue %struct.nested %struct, {i8, i32} %smallstruct, 1
950 store %struct.nested %res, %struct.nested* %addr
954 ; CHECK-LABEL: name: test_select
955 ; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
956 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
957 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w1
958 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w2
959 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
960 ; CHECK: $w0 = COPY [[RES]]
961 define i32 @test_select(i1 %tst, i32 %lhs, i32 %rhs) {
962 %res = select i1 %tst, i32 %lhs, i32 %rhs
966 ; CHECK-LABEL: name: test_select_ptr
967 ; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
968 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
969 ; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x1
970 ; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x2
971 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
972 ; CHECK: $x0 = COPY [[RES]]
973 define i8* @test_select_ptr(i1 %tst, i8* %lhs, i8* %rhs) {
974 %res = select i1 %tst, i8* %lhs, i8* %rhs
978 ; CHECK-LABEL: name: test_select_vec
979 ; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
980 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
981 ; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q0
982 ; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1
983 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
984 ; CHECK: $q0 = COPY [[RES]]
985 define <4 x i32> @test_select_vec(i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs) {
986 %res = select i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs
990 ; CHECK-LABEL: name: test_vselect_vec
991 ; CHECK: [[TST32:%[0-9]+]]:_(<4 x s32>) = COPY $q0
992 ; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1
993 ; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q2
994 ; CHECK: [[TST:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[TST32]](<4 x s32>)
995 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](<4 x s1>), [[LHS]], [[RHS]]
996 ; CHECK: $q0 = COPY [[RES]]
997 define <4 x i32> @test_vselect_vec(<4 x i32> %tst32, <4 x i32> %lhs, <4 x i32> %rhs) {
998 %tst = trunc <4 x i32> %tst32 to <4 x i1>
999 %res = select <4 x i1> %tst, <4 x i32> %lhs, <4 x i32> %rhs
1003 ; CHECK-LABEL: name: test_fptosi
1004 ; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0
1005 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
1006 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOSI [[FP]](s32)
1007 ; CHECK: $x0 = COPY [[RES]]
1008 define i64 @test_fptosi(float* %fp.addr) {
1009 %fp = load float, float* %fp.addr
1010 %res = fptosi float %fp to i64
1014 ; CHECK-LABEL: name: test_fptoui
1015 ; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0
1016 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
1017 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOUI [[FP]](s32)
1018 ; CHECK: $x0 = COPY [[RES]]
1019 define i64 @test_fptoui(float* %fp.addr) {
1020 %fp = load float, float* %fp.addr
1021 %res = fptoui float %fp to i64
1025 ; CHECK-LABEL: name: test_sitofp
1026 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1027 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
1028 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_SITOFP [[IN]](s32)
1029 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
1030 define void @test_sitofp(double* %addr, i32 %in) {
1031 %fp = sitofp i32 %in to double
1032 store double %fp, double* %addr
1036 ; CHECK-LABEL: name: test_uitofp
1037 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1038 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
1039 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_UITOFP [[IN]](s32)
1040 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
1041 define void @test_uitofp(double* %addr, i32 %in) {
1042 %fp = uitofp i32 %in to double
1043 store double %fp, double* %addr
1047 ; CHECK-LABEL: name: test_fpext
1048 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $s0
1049 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPEXT [[IN]](s32)
1050 ; CHECK: $d0 = COPY [[RES]]
1051 define double @test_fpext(float %in) {
1052 %res = fpext float %in to double
1056 ; CHECK-LABEL: name: test_fptrunc
1057 ; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY $d0
1058 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPTRUNC [[IN]](s64)
1059 ; CHECK: $s0 = COPY [[RES]]
1060 define float @test_fptrunc(double %in) {
1061 %res = fptrunc double %in to float
1065 ; CHECK-LABEL: name: test_constant_float
1066 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1067 ; CHECK: [[TMP:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+00
1068 ; CHECK: G_STORE [[TMP]](s32), [[ADDR]](p0)
1069 define void @test_constant_float(float* %addr) {
1070 store float 1.5, float* %addr
1074 ; CHECK-LABEL: name: float_comparison
1075 ; CHECK: [[LHSADDR:%[0-9]+]]:_(p0) = COPY $x0
1076 ; CHECK: [[RHSADDR:%[0-9]+]]:_(p0) = COPY $x1
1077 ; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY $x2
1078 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = G_LOAD [[LHSADDR]](p0)
1079 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0)
1080 ; CHECK: [[TST:%[0-9]+]]:_(s1) = nnan ninf nsz arcp contract afn reassoc G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
1081 ; CHECK: G_STORE [[TST]](s1), [[BOOLADDR]](p0)
1082 define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
1083 %a = load float, float* %a.addr
1084 %b = load float, float* %b.addr
1085 %res = fcmp nnan ninf nsz arcp contract afn reassoc oge float %a, %b
1086 store i1 %res, i1* %bool.addr
1090 ; CHECK-LABEL: name: trivial_float_comparison
1091 ; CHECK: [[ENTRY_R1:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
1092 ; CHECK: [[ENTRY_R2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
1093 ; CHECK: [[R1:%[0-9]+]]:_(s1) = COPY [[ENTRY_R1]](s1)
1094 ; CHECK: [[R2:%[0-9]+]]:_(s1) = COPY [[ENTRY_R2]](s1)
1095 ; CHECK: G_ADD [[R1]], [[R2]]
1096 define i1 @trivial_float_comparison(double %a, double %b) {
1097 %r1 = fcmp false double %a, %b
1098 %r2 = fcmp true double %a, %b
1099 %sum = add i1 %r1, %r2
1105 define i32* @test_global() {
1106 ; CHECK-LABEL: name: test_global
1107 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var{{$}}
1108 ; CHECK: $x0 = COPY [[TMP]](p0)
1113 @var1 = addrspace(42) global i32 0
1114 define i32 addrspace(42)* @test_global_addrspace() {
1115 ; CHECK-LABEL: name: test_global
1116 ; CHECK: [[TMP:%[0-9]+]]:_(p42) = G_GLOBAL_VALUE @var1{{$}}
1117 ; CHECK: $x0 = COPY [[TMP]](p42)
1119 ret i32 addrspace(42)* @var1
1123 define void()* @test_global_func() {
1124 ; CHECK-LABEL: name: test_global_func
1125 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @allocai64{{$}}
1126 ; CHECK: $x0 = COPY [[TMP]](p0)
1128 ret void()* @allocai64
1131 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
1132 define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
1133 ; CHECK-LABEL: name: test_memcpy
1134 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1135 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
1136 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1137 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src)
1138 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
1142 declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)*, i8 addrspace(1)*, i64, i1)
1143 define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %src, i64 %size) {
1144 ; CHECK-LABEL: name: test_memcpy_nonzero_as
1145 ; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0
1146 ; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1
1147 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1148 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p1), [[SRC]](p1), [[SIZE]](s64) :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1)
1149 call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0)
1153 declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1)
1154 define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
1155 ; CHECK-LABEL: name: test_memmove
1156 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1157 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
1158 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1159 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src)
1160 call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
1164 declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)
1165 define void @test_memset(i8* %dst, i8 %val, i64 %size) {
1166 ; CHECK-LABEL: name: test_memset
1167 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1168 ; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
1169 ; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
1170 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1171 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[DST]](p0), [[SRC]](s8), [[SIZE]](s64) :: (store 1 into %ir.dst)
1172 call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
1176 declare i64 @llvm.objectsize.i64(i8*, i1)
1177 declare i32 @llvm.objectsize.i32(i8*, i1)
1178 define void @test_objectsize(i8* %addr0, i8* %addr1) {
1179 ; CHECK-LABEL: name: test_objectsize
1180 ; CHECK: [[ADDR0:%[0-9]+]]:_(p0) = COPY $x0
1181 ; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $x1
1182 ; CHECK: {{%[0-9]+}}:_(s64) = G_CONSTANT i64 -1
1183 ; CHECK: {{%[0-9]+}}:_(s64) = G_CONSTANT i64 0
1184 ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 -1
1185 ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 0
1186 %size64.0 = call i64 @llvm.objectsize.i64(i8* %addr0, i1 0)
1187 %size64.intmin = call i64 @llvm.objectsize.i64(i8* %addr0, i1 1)
1188 %size32.0 = call i32 @llvm.objectsize.i32(i8* %addr0, i1 0)
1189 %size32.intmin = call i32 @llvm.objectsize.i32(i8* %addr0, i1 1)
1193 define void @test_large_const(i128* %addr) {
1194 ; CHECK-LABEL: name: test_large_const
1195 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1196 ; CHECK: [[VAL:%[0-9]+]]:_(s128) = G_CONSTANT i128 42
1197 ; CHECK: G_STORE [[VAL]](s128), [[ADDR]](p0)
1198 store i128 42, i128* %addr
1202 ; When there was no formal argument handling (so the first BB was empty) we used
1203 ; to insert the constants at the end of the block, even if they were encountered
1204 ; after the block's terminators had been emitted. Also make sure the order is
1206 define i8* @test_const_placement() {
1207 ; CHECK-LABEL: name: test_const_placement
1208 ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
1209 ; CHECK: [[VAL_INT:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
1210 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = G_INTTOPTR [[VAL_INT]](s32)
1211 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
1215 ret i8* inttoptr(i32 42 to i8*)
1218 declare void @llvm.va_end(i8*)
1219 define void @test_va_end(i8* %list) {
1220 ; CHECK-LABEL: name: test_va_end
1222 ; CHECK-NOT: INTRINSIC
1223 ; CHECK: RET_ReallyLR
1224 call void @llvm.va_end(i8* %list)
1228 define void @test_va_arg(i8* %list) {
1229 ; CHECK-LABEL: test_va_arg
1230 ; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
1231 ; CHECK: G_VAARG [[LIST]](p0), 8
1232 ; CHECK: G_VAARG [[LIST]](p0), 1
1233 ; CHECK: G_VAARG [[LIST]](p0), 16
1235 %v0 = va_arg i8* %list, i64
1236 %v1 = va_arg i8* %list, i8
1237 %v2 = va_arg i8* %list, i128
1241 declare float @llvm.pow.f32(float, float)
1242 define float @test_pow_intrin(float %l, float %r) {
1243 ; CHECK-LABEL: name: test_pow_intrin
1244 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0
1245 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $s1
1246 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FPOW [[LHS]], [[RHS]]
1247 ; CHECK: $s0 = COPY [[RES]]
1248 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.pow.f32(float %l, float %r)
1252 declare float @llvm.fma.f32(float, float, float)
1253 define float @test_fma_intrin(float %a, float %b, float %c) {
1254 ; CHECK-LABEL: name: test_fma_intrin
1255 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1256 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
1257 ; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $s2
1258 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[A]], [[B]], [[C]]
1259 ; CHECK: $s0 = COPY [[RES]]
1260 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fma.f32(float %a, float %b, float %c)
1264 declare float @llvm.exp.f32(float)
1265 define float @test_exp_intrin(float %a) {
1266 ; CHECK-LABEL: name: test_exp_intrin
1267 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1268 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP [[A]]
1269 ; CHECK: $s0 = COPY [[RES]]
1270 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp.f32(float %a)
1274 declare float @llvm.exp2.f32(float)
1275 define float @test_exp2_intrin(float %a) {
1276 ; CHECK-LABEL: name: test_exp2_intrin
1277 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1278 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP2 [[A]]
1279 ; CHECK: $s0 = COPY [[RES]]
1280 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp2.f32(float %a)
1284 declare float @llvm.log.f32(float)
1285 define float @test_log_intrin(float %a) {
1286 ; CHECK-LABEL: name: test_log_intrin
1287 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1288 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG [[A]]
1289 ; CHECK: $s0 = COPY [[RES]]
1290 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log.f32(float %a)
1294 declare float @llvm.log2.f32(float)
1295 define float @test_log2_intrin(float %a) {
1296 ; CHECK-LABEL: name: test_log2_intrin
1297 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1298 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG2 [[A]]
1299 ; CHECK: $s0 = COPY [[RES]]
1300 %res = call float @llvm.log2.f32(float %a)
1304 declare float @llvm.log10.f32(float)
1305 define float @test_log10_intrin(float %a) {
1306 ; CHECK-LABEL: name: test_log10_intrin
1307 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1308 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG10 [[A]]
1309 ; CHECK: $s0 = COPY [[RES]]
1310 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log10.f32(float %a)
1314 declare float @llvm.fabs.f32(float)
1315 define float @test_fabs_intrin(float %a) {
1316 ; CHECK-LABEL: name: test_fabs_intrin
1317 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1318 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FABS [[A]]
1319 ; CHECK: $s0 = COPY [[RES]]
1320 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fabs.f32(float %a)
1324 declare float @llvm.copysign.f32(float, float)
1325 define float @test_fcopysign_intrin(float %a, float %b) {
1326 ; CHECK-LABEL: name: test_fcopysign_intrin
1327 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1328 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
1329 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FCOPYSIGN [[A]], [[B]]
1330 ; CHECK: $s0 = COPY [[RES]]
1332 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.copysign.f32(float %a, float %b)
1336 declare float @llvm.canonicalize.f32(float)
1337 define float @test_fcanonicalize_intrin(float %a) {
1338 ; CHECK-LABEL: name: test_fcanonicalize_intrin
1339 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1340 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FCANONICALIZE [[A]]
1341 ; CHECK: $s0 = COPY [[RES]]
1342 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.canonicalize.f32(float %a)
1346 declare float @llvm.trunc.f32(float)
1347 define float @test_intrinsic_trunc(float %a) {
1348 ; CHECK-LABEL: name: test_intrinsic_trunc
1349 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1350 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[A]]
1351 ; CHECK: $s0 = COPY [[RES]]
1352 %res = call float @llvm.trunc.f32(float %a)
1356 declare float @llvm.round.f32(float)
1357 define float @test_intrinsic_round(float %a) {
1358 ; CHECK-LABEL: name: test_intrinsic_round
1359 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1360 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_INTRINSIC_ROUND [[A]]
1361 ; CHECK: $s0 = COPY [[RES]]
1362 %res = call float @llvm.round.f32(float %a)
1366 declare i32 @llvm.ctlz.i32(i32, i1)
1367 define i32 @test_ctlz_intrinsic_zero_not_undef(i32 %a) {
1368 ; CHECK-LABEL: name: test_ctlz_intrinsic_zero_not_undef
1369 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1370 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CTLZ [[A]]
1371 ; CHECK: $w0 = COPY [[RES]]
1372 %res = call i32 @llvm.ctlz.i32(i32 %a, i1 0)
1376 declare i32 @llvm.cttz.i32(i32, i1)
1377 define i32 @test_cttz_intrinsic_zero_undef(i32 %a) {
1378 ; CHECK-LABEL: name: test_cttz_intrinsic_zero_undef
1379 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1380 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[A]]
1381 ; CHECK: $w0 = COPY [[RES]]
1382 %res = call i32 @llvm.cttz.i32(i32 %a, i1 1)
1386 declare i32 @llvm.ctpop.i32(i32)
1387 define i32 @test_ctpop_intrinsic(i32 %a) {
1388 ; CHECK-LABEL: name: test_ctpop
1389 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1390 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CTPOP [[A]]
1391 ; CHECK: $w0 = COPY [[RES]]
1392 %res = call i32 @llvm.ctpop.i32(i32 %a)
1396 declare i32 @llvm.bitreverse.i32(i32)
1397 define i32 @test_bitreverse_intrinsic(i32 %a) {
1398 ; CHECK-LABEL: name: test_bitreverse
1399 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1400 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_BITREVERSE [[A]]
1401 ; CHECK: $w0 = COPY [[RES]]
1402 %res = call i32 @llvm.bitreverse.i32(i32 %a)
1406 declare void @llvm.lifetime.start.p0i8(i64, i8*)
1407 declare void @llvm.lifetime.end.p0i8(i64, i8*)
1408 define void @test_lifetime_intrin() {
1409 ; CHECK-LABEL: name: test_lifetime_intrin
1410 ; CHECK: RET_ReallyLR
1411 ; O3-LABEL: name: test_lifetime_intrin
1412 ; O3: {{%[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.0.slot
1413 ; O3-NEXT: LIFETIME_START %stack.0.slot
1414 ; O3-NEXT: LIFETIME_END %stack.0.slot
1415 ; O3-NEXT: RET_ReallyLR
1416 %slot = alloca i8, i32 4
1417 call void @llvm.lifetime.start.p0i8(i64 0, i8* %slot)
1418 call void @llvm.lifetime.end.p0i8(i64 0, i8* %slot)
1422 define void @test_load_store_atomics(i8* %addr) {
1423 ; CHECK-LABEL: name: test_load_store_atomics
1424 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1425 ; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
1426 ; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
1427 ; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
1428 ; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
1429 ; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
1430 ; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
1431 %v0 = load atomic i8, i8* %addr unordered, align 1
1432 store atomic i8 %v0, i8* %addr monotonic, align 1
1434 %v1 = load atomic i8, i8* %addr acquire, align 1
1435 store atomic i8 %v1, i8* %addr release, align 1
1437 %v2 = load atomic i8, i8* %addr syncscope("singlethread") seq_cst, align 1
1438 store atomic i8 %v2, i8* %addr syncscope("singlethread") monotonic, align 1
1443 define float @test_fneg_f32(float %x) {
1444 ; CHECK-LABEL: name: test_fneg_f32
1445 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $s0
1446 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FNEG [[ARG]]
1447 ; CHECK: $s0 = COPY [[RES]](s32)
1448 %neg = fsub float -0.000000e+00, %x
1452 define float @test_fneg_f32_fmf(float %x) {
1453 ; CHECK-LABEL: name: test_fneg_f32
1454 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $s0
1455 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FNEG [[ARG]]
1456 ; CHECK: $s0 = COPY [[RES]](s32)
1457 %neg = fsub fast float -0.000000e+00, %x
1461 define double @test_fneg_f64(double %x) {
1462 ; CHECK-LABEL: name: test_fneg_f64
1463 ; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY $d0
1464 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FNEG [[ARG]]
1465 ; CHECK: $d0 = COPY [[RES]](s64)
1466 %neg = fsub double -0.000000e+00, %x
1470 define double @test_fneg_f64_fmf(double %x) {
1471 ; CHECK-LABEL: name: test_fneg_f64
1472 ; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY $d0
1473 ; CHECK: [[RES:%[0-9]+]]:_(s64) = nnan ninf nsz arcp contract afn reassoc G_FNEG [[ARG]]
1474 ; CHECK: $d0 = COPY [[RES]](s64)
1475 %neg = fsub fast double -0.000000e+00, %x
1479 define void @test_trivial_inlineasm() {
1480 ; CHECK-LABEL: name: test_trivial_inlineasm
1481 ; CHECK: INLINEASM &wibble, 1
1482 ; CHECK: INLINEASM &wibble, 0
1483 call void asm sideeffect "wibble", ""()
1484 call void asm "wibble", ""()
1488 define <2 x i32> @test_insertelement(<2 x i32> %vec, i32 %elt, i32 %idx){
1489 ; CHECK-LABEL: name: test_insertelement
1490 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1491 ; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0
1492 ; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w1
1493 ; CHECK: [[RES:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[VEC]], [[ELT]](s32), [[IDX]](s32)
1494 ; CHECK: $d0 = COPY [[RES]](<2 x s32>)
1495 %res = insertelement <2 x i32> %vec, i32 %elt, i32 %idx
1499 define i32 @test_extractelement(<2 x i32> %vec, i32 %idx) {
1500 ; CHECK-LABEL: name: test_extractelement
1501 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1502 ; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w0
1503 ; CHECK: [[IDXEXT:%[0-9]+]]:_(s64) = G_SEXT [[IDX]]
1504 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDXEXT]](s64)
1505 ; CHECK: $w0 = COPY [[RES]](s32)
1506 %res = extractelement <2 x i32> %vec, i32 %idx
1510 define i32 @test_extractelement_const_idx(<2 x i32> %vec) {
1511 ; CHECK-LABEL: name: test_extractelement
1512 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1513 ; CHECK: [[IDX:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
1514 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDX]](s64)
1515 ; CHECK: $w0 = COPY [[RES]](s32)
1516 %res = extractelement <2 x i32> %vec, i32 1
1520 define i32 @test_singleelementvector(i32 %elt){
1521 ; CHECK-LABEL: name: test_singleelementvector
1522 ; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0
1523 ; CHECK-NOT: G_INSERT_VECTOR_ELT
1524 ; CHECK-NOT: G_EXTRACT_VECTOR_ELT
1525 ; CHECK: $w0 = COPY [[ELT]](s32)
1526 %vec = insertelement <1 x i32> undef, i32 %elt, i32 0
1527 %res = extractelement <1 x i32> %vec, i32 0
1531 define <2 x i32> @test_constantaggzerovector_v2i32() {
1532 ; CHECK-LABEL: name: test_constantaggzerovector_v2i32
1533 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1534 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32)
1535 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1536 ret <2 x i32> zeroinitializer
1539 define <2 x float> @test_constantaggzerovector_v2f32() {
1540 ; CHECK-LABEL: name: test_constantaggzerovector_v2f32
1541 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
1542 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32)
1543 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1544 ret <2 x float> zeroinitializer
1547 define i32 @test_constantaggzerovector_v3i32() {
1548 ; CHECK-LABEL: name: test_constantaggzerovector_v3i32
1549 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1550 ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32), [[ZERO]](s32)
1551 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
1552 %elt = extractelement <3 x i32> zeroinitializer, i32 1
1556 define <2 x i32> @test_constantdatavector_v2i32() {
1557 ; CHECK-LABEL: name: test_constantdatavector_v2i32
1558 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1559 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1560 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32)
1561 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1562 ret <2 x i32> <i32 1, i32 2>
1565 define i32 @test_constantdatavector_v3i32() {
1566 ; CHECK-LABEL: name: test_constantdatavector_v3i32
1567 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1568 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1569 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
1570 ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C3]](s32)
1571 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
1572 %elt = extractelement <3 x i32> <i32 1, i32 2, i32 3>, i32 1
1576 define <4 x i32> @test_constantdatavector_v4i32() {
1577 ; CHECK-LABEL: name: test_constantdatavector_v4i32
1578 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1579 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1580 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
1581 ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1582 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32)
1583 ; CHECK: $q0 = COPY [[VEC]](<4 x s32>)
1584 ret <4 x i32> <i32 1, i32 2, i32 3, i32 4>
1587 define <2 x double> @test_constantdatavector_v2f64() {
1588 ; CHECK-LABEL: name: test_constantdatavector_v2f64
1589 ; CHECK: [[FC1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
1590 ; CHECK: [[FC2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1591 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FC1]](s64), [[FC2]](s64)
1592 ; CHECK: $q0 = COPY [[VEC]](<2 x s64>)
1593 ret <2 x double> <double 1.0, double 2.0>
1596 define i32 @test_constantaggzerovector_v1s32(i32 %arg){
1597 ; CHECK-LABEL: name: test_constantaggzerovector_v1s32
1598 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
1599 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1600 ; CHECK-NOT: G_MERGE_VALUES
1601 ; CHECK: G_ADD [[ARG]], [[C0]]
1602 %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
1603 %add = add <1 x i32> %vec, zeroinitializer
1604 %res = extractelement <1 x i32> %add, i32 0
1608 define i32 @test_constantdatavector_v1s32(i32 %arg){
1609 ; CHECK-LABEL: name: test_constantdatavector_v1s32
1610 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
1611 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1612 ; CHECK-NOT: G_MERGE_VALUES
1613 ; CHECK: G_ADD [[ARG]], [[C1]]
1614 %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
1615 %add = add <1 x i32> %vec, <i32 1>
1616 %res = extractelement <1 x i32> %add, i32 0
1620 declare ghccc float @different_call_conv_target(float %x)
1621 define float @test_different_call_conv_target(float %x) {
1622 ; CHECK-LABEL: name: test_different_call_conv
1623 ; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $s0
1624 ; CHECK: $s8 = COPY [[X]]
1625 ; CHECK: BL @different_call_conv_target, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s8, implicit-def $s0
1626 %res = call ghccc float @different_call_conv_target(float %x)
1630 define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
1631 ; CHECK-LABEL: name: test_shufflevector_s32_v2s32
1632 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
1633 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1634 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], shufflemask(0, 0)
1635 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1636 %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
1637 %res = shufflevector <1 x i32> %vec, <1 x i32> undef, <2 x i32> zeroinitializer
1641 define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
1642 ; CHECK-LABEL: name: test_shufflevector_v2s32_s32
1643 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1644 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(1)
1645 ; CHECK: $w0 = COPY [[RES]](s32)
1646 %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <1 x i32> <i32 1>
1647 %res = extractelement <1 x i32> %vec, i32 0
1651 define <2 x i32> @test_shufflevector_v2s32_v2s32_undef(<2 x i32> %arg) {
1652 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32_undef
1653 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1654 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1655 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(undef, undef)
1656 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1657 %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> undef
1661 define <2 x i32> @test_shufflevector_v2s32_v2s32_undef_0(<2 x i32> %arg) {
1662 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32_undef_0
1663 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1664 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1665 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(undef, 0)
1666 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1667 %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> <i32 undef, i32 0>
1671 define <2 x i32> @test_shufflevector_v2s32_v2s32_0_undef(<2 x i32> %arg) {
1672 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32_0_undef
1673 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1674 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1675 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(0, undef)
1676 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1677 %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> <i32 0, i32 undef>
1681 define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) {
1682 ; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
1683 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1684 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1685 ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(1, 0, 1)
1686 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
1687 %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
1688 %res = extractelement <3 x i32> %vec, i32 0
1692 define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg2) {
1693 ; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
1694 ; CHECK: [[ARG1:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1695 ; CHECK: [[ARG2:%[0-9]+]]:_(<2 x s32>) = COPY $d1
1696 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[ARG1]](<2 x s32>), [[ARG2]], shufflemask(0, 1, 2, 3)
1697 ; CHECK: $q0 = COPY [[VEC]](<4 x s32>)
1698 %res = shufflevector <2 x i32> %arg1, <2 x i32> %arg2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
1702 define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) {
1703 ; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
1704 ; CHECK: [[ARG:%[0-9]+]]:_(<4 x s32>) = COPY $q0
1705 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
1706 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<4 x s32>), [[UNDEF]], shufflemask(1, 3)
1707 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1708 %res = shufflevector <4 x i32> %arg, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
1713 define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) {
1714 ; CHECK-LABEL: name: test_shufflevector_v8s8_v16s8
1715 ; CHECK: [[ARG1:%[0-9]+]]:_(<8 x s8>) = COPY $d0
1716 ; CHECK: [[ARG2:%[0-9]+]]:_(<8 x s8>) = COPY $d1
1717 ; CHECK: [[VEC:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[ARG1]](<8 x s8>), [[ARG2]], shufflemask(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15)
1718 ; CHECK: $q0 = COPY [[VEC]](<16 x s8>)
1719 %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
1723 ; CHECK-LABEL: test_constant_vector
1724 ; CHECK: [[UNDEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1725 ; CHECK: [[F:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
1726 ; CHECK: [[M:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16)
1727 ; CHECK: $d0 = COPY [[M]](<4 x s16>)
1728 define <4 x half> @test_constant_vector() {
1729 ret <4 x half> <half undef, half undef, half undef, half 0xH3C00>
1732 define i32 @test_target_mem_intrinsic(i32* %addr) {
1733 ; CHECK-LABEL: name: test_target_mem_intrinsic
1734 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1735 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load 4 from %ir.addr)
1736 ; CHECK: G_TRUNC [[VAL]](s64)
1737 %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
1738 %trunc = trunc i64 %val to i32
1742 declare i64 @llvm.aarch64.ldxr.p0i32(i32*) nounwind
1744 %zerosize_type = type {}
1746 define %zerosize_type @test_empty_load_store(%zerosize_type *%ptr, %zerosize_type %in) noinline optnone {
1747 ; CHECK-LABEL: name: test_empty_load_store
1748 ; CHECK-NOT: G_STORE
1750 ; CHECK: RET_ReallyLR
1752 store %zerosize_type undef, %zerosize_type* undef, align 4
1753 %val = load %zerosize_type, %zerosize_type* %ptr, align 4
1754 ret %zerosize_type %in
1758 define i64 @test_phi_loop(i32 %n) {
1759 ; CHECK-LABEL: name: test_phi_loop
1760 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
1761 ; CHECK: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1762 ; CHECK: [[CST2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1763 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
1764 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
1766 ; CHECK: [[PN1:%[0-9]+]]:_(s32) = G_PHI [[ARG1]](s32), %bb.1, [[SUB:%[0-9]+]](s32), %bb.2
1767 ; CHECK: [[PN2:%[0-9]+]]:_(s64) = G_PHI [[CST3]](s64), %bb.1, [[PN3:%[0-9]+]](s64), %bb.2
1768 ; CHECK: [[PN3]]:_(s64) = G_PHI [[CST4]](s64), %bb.1, [[ADD:%[0-9]+]](s64), %bb.2
1769 ; CHECK: [[ADD]]:_(s64) = G_ADD [[PN2]], [[PN3]]
1770 ; CHECK: [[SUB]]:_(s32) = G_SUB [[PN1]], [[CST1]]
1771 ; CHECK: [[CMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PN1]](s32), [[CST2]]
1772 ; CHECK: G_BRCOND [[CMP]](s1), %bb.3
1775 ; CHECK: $x0 = COPY [[PN2]](s64)
1776 ; CHECK: RET_ReallyLR implicit $x0
1781 %counter = phi i32 [ %n, %entry ], [ %counter.dec, %loop ]
1782 %elem = phi { i64, i64 } [ { i64 0, i64 1 }, %entry ], [ %updated, %loop ]
1783 %prev = extractvalue { i64, i64 } %elem, 0
1784 %curr = extractvalue { i64, i64 } %elem, 1
1785 %next = add i64 %prev, %curr
1786 %shifted = insertvalue { i64, i64 } %elem, i64 %curr, 0
1787 %updated = insertvalue { i64, i64 } %shifted, i64 %next, 1
1788 %counter.dec = sub i32 %counter, 1
1789 %cond = icmp sle i32 %counter, 0
1790 br i1 %cond, label %exit, label %loop
1793 %res = extractvalue { i64, i64 } %elem, 0
1797 define void @test_phi_diamond({ i8, i16, i32 }* %a.ptr, { i8, i16, i32 }* %b.ptr, i1 %selector, { i8, i16, i32 }* %dst) {
1798 ; CHECK-LABEL: name: test_phi_diamond
1799 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
1800 ; CHECK: [[ARG2:%[0-9]+]]:_(p0) = COPY $x1
1801 ; CHECK: [[ARG3:%[0-9]+]]:_(s32) = COPY $w2
1802 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ARG3]](s32)
1803 ; CHECK: [[ARG4:%[0-9]+]]:_(p0) = COPY $x3
1804 ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.2
1807 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load 1 from %ir.a.ptr, align 4)
1808 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
1809 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[ARG1]], [[CST1]](s64)
1810 ; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load 2 from %ir.a.ptr + 2)
1811 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1812 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ARG1]], [[CST2]](s64)
1813 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.a.ptr + 4)
1816 ; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load 1 from %ir.b.ptr, align 4)
1817 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
1818 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[ARG2]], [[CST3]](s64)
1819 ; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load 2 from %ir.b.ptr + 2)
1820 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1821 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[ARG2]], [[CST4]](s64)
1822 ; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.b.ptr + 4)
1824 ; CHECK: [[PN1:%[0-9]+]]:_(s8) = G_PHI [[LD1]](s8), %bb.2, [[LD4]](s8), %bb.3
1825 ; CHECK: [[PN2:%[0-9]+]]:_(s16) = G_PHI [[LD2]](s16), %bb.2, [[LD5]](s16), %bb.3
1826 ; CHECK: [[PN3:%[0-9]+]]:_(s32) = G_PHI [[LD3]](s32), %bb.2, [[LD6]](s32), %bb.3
1827 ; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store 1 into %ir.dst, align 4)
1828 ; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
1829 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[ARG4]], [[CST5]](s64)
1830 ; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store 2 into %ir.dst + 2)
1831 ; CHECK: [[CST6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1832 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[ARG4]], [[CST6]](s64)
1833 ; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store 4 into %ir.dst + 4)
1834 ; CHECK: RET_ReallyLR
1837 br i1 %selector, label %store.a, label %store.b
1840 %a = load { i8, i16, i32 }, { i8, i16, i32 }* %a.ptr
1844 %b = load { i8, i16, i32 }, { i8, i16, i32 }* %b.ptr
1848 %v = phi { i8, i16, i32 } [ %a, %store.a ], [ %b, %store.b ]
1849 store { i8, i16, i32 } %v, { i8, i16, i32 }* %dst
1853 %agg.inner.inner = type {i64, i64}
1854 %agg.inner = type {i16, i8, %agg.inner.inner }
1855 %agg.nested = type {i32, i32, %agg.inner, i32}
1857 define void @test_nested_aggregate_const(%agg.nested *%ptr) {
1858 ; CHECK-LABEL: name: test_nested_aggregate_const
1859 ; CHECK: [[BASE:%[0-9]+]]:_(p0) = COPY $x0
1860 ; CHECK: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1861 ; CHECK: [[CST2:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
1862 ; CHECK: [[CST3:%[0-9]+]]:_(s8) = G_CONSTANT i8 3
1863 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
1864 ; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1865 ; CHECK: [[CST6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
1866 ; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store 4 into %ir.ptr, align 8)
1867 ; CHECK: [[CST7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1868 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST7]](s64)
1869 ; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store 4 into %ir.ptr + 4)
1870 ; CHECK: [[CST8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1871 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST8]](s64)
1872 ; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store 2 into %ir.ptr + 8, align 8)
1873 ; CHECK: [[CST9:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
1874 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST9]](s64)
1875 ; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store 1 into %ir.ptr + 10, align 2)
1876 ; CHECK: [[CST10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
1877 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST10]](s64)
1878 ; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store 8 into %ir.ptr + 16)
1879 ; CHECK: [[CST11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
1880 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST11]](s64)
1881 ; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store 8 into %ir.ptr + 24)
1882 ; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
1883 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[CST12]](s64)
1884 ; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store 4 into %ir.ptr + 32, align 8)
1885 store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, %agg.nested *%ptr
1889 define i1 @return_i1_zext() {
1890 ; AAPCS ABI says that booleans can only be 1 or 0, so we need to zero-extend.
1891 ; CHECK-LABEL: name: return_i1_zext
1892 ; CHECK: [[CST:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
1893 ; CHECK: [[ZEXT:%[0-9]+]]:_(s8) = G_ZEXT [[CST]](s1)
1894 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s8)
1895 ; CHECK: $w0 = COPY [[ANYEXT]](s32)
1896 ; CHECK: RET_ReallyLR implicit $w0
1901 define i32 @test_atomic_cmpxchg_1(i32* %addr) {
1902 ; CHECK-LABEL: name: test_atomic_cmpxchg_1
1903 ; CHECK: bb.1.entry:
1904 ; CHECK-NEXT: successors: %bb.{{[^)]+}}
1905 ; CHECK-NEXT: liveins: $x0
1906 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1907 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1908 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1909 ; CHECK: bb.2.repeat:
1910 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
1911 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic 4 on %ir.addr)
1912 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
1913 ; CHECK-NEXT: G_BR %bb.2
1918 %val_success = cmpxchg i32* %addr, i32 0, i32 1 monotonic monotonic
1919 %value_loaded = extractvalue { i32, i1 } %val_success, 0
1920 %success = extractvalue { i32, i1 } %val_success, 1
1921 br i1 %success, label %done, label %repeat
1923 ret i32 %value_loaded
1926 ; Try one cmpxchg with a small type and high atomic ordering.
1927 define i16 @test_atomic_cmpxchg_2(i16* %addr) {
1928 ; CHECK-LABEL: name: test_atomic_cmpxchg_2
1929 ; CHECK: bb.1.entry:
1930 ; CHECK-NEXT: successors: %bb.2({{[^)]+}})
1931 ; CHECK-NEXT: liveins: $x0
1932 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1933 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
1934 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
1935 ; CHECK: bb.2.repeat:
1936 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
1937 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s16), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst seq_cst 2 on %ir.addr)
1938 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
1939 ; CHECK-NEXT: G_BR %bb.2
1944 %val_success = cmpxchg i16* %addr, i16 0, i16 1 seq_cst seq_cst
1945 %value_loaded = extractvalue { i16, i1 } %val_success, 0
1946 %success = extractvalue { i16, i1 } %val_success, 1
1947 br i1 %success, label %done, label %repeat
1949 ret i16 %value_loaded
1952 ; Try one cmpxchg where the success order and failure order differ.
1953 define i64 @test_atomic_cmpxchg_3(i64* %addr) {
1954 ; CHECK-LABEL: name: test_atomic_cmpxchg_3
1955 ; CHECK: bb.1.entry:
1956 ; CHECK-NEXT: successors: %bb.2({{[^)]+}})
1957 ; CHECK-NEXT: liveins: $x0
1958 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1959 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
1960 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
1961 ; CHECK: bb.2.repeat:
1962 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
1963 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s64), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst acquire 8 on %ir.addr)
1964 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
1965 ; CHECK-NEXT: G_BR %bb.2
1970 %val_success = cmpxchg i64* %addr, i64 0, i64 1 seq_cst acquire
1971 %value_loaded = extractvalue { i64, i1 } %val_success, 0
1972 %success = extractvalue { i64, i1 } %val_success, 1
1973 br i1 %success, label %done, label %repeat
1975 ret i64 %value_loaded
1978 ; Try a monotonic atomicrmw xchg
1979 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
1980 define i32 @test_atomicrmw_xchg(i256* %addr) {
1981 ; CHECK-LABEL: name: test_atomicrmw_xchg
1982 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
1983 ; CHECK-NEXT: liveins: $x0
1984 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1985 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
1986 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XCHG [[ADDR]](p0), [[VAL]] :: (load store monotonic 32 on %ir.addr)
1987 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
1988 %oldval = atomicrmw xchg i256* %addr, i256 1 monotonic
1989 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
1990 ; test so work around it by truncating to i32 for now.
1991 %oldval.trunc = trunc i256 %oldval to i32
1992 ret i32 %oldval.trunc
1995 ; Try an acquire atomicrmw add
1996 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
1997 define i32 @test_atomicrmw_add(i256* %addr) {
1998 ; CHECK-LABEL: name: test_atomicrmw_add
1999 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2000 ; CHECK-NEXT: liveins: $x0
2001 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2002 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2003 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_ADD [[ADDR]](p0), [[VAL]] :: (load store acquire 32 on %ir.addr)
2004 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2005 %oldval = atomicrmw add i256* %addr, i256 1 acquire
2006 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2007 ; test so work around it by truncating to i32 for now.
2008 %oldval.trunc = trunc i256 %oldval to i32
2009 ret i32 %oldval.trunc
2012 ; Try a release atomicrmw sub
2013 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2014 define i32 @test_atomicrmw_sub(i256* %addr) {
2015 ; CHECK-LABEL: name: test_atomicrmw_sub
2016 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2017 ; CHECK-NEXT: liveins: $x0
2018 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2019 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2020 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_SUB [[ADDR]](p0), [[VAL]] :: (load store release 32 on %ir.addr)
2021 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2022 %oldval = atomicrmw sub i256* %addr, i256 1 release
2023 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2024 ; test so work around it by truncating to i32 for now.
2025 %oldval.trunc = trunc i256 %oldval to i32
2026 ret i32 %oldval.trunc
2029 ; Try an acq_rel atomicrmw and
2030 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2031 define i32 @test_atomicrmw_and(i256* %addr) {
2032 ; CHECK-LABEL: name: test_atomicrmw_and
2033 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2034 ; CHECK-NEXT: liveins: $x0
2035 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2036 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2037 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_AND [[ADDR]](p0), [[VAL]] :: (load store acq_rel 32 on %ir.addr)
2038 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2039 %oldval = atomicrmw and i256* %addr, i256 1 acq_rel
2040 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2041 ; test so work around it by truncating to i32 for now.
2042 %oldval.trunc = trunc i256 %oldval to i32
2043 ret i32 %oldval.trunc
2046 ; Try an seq_cst atomicrmw nand
2047 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2048 define i32 @test_atomicrmw_nand(i256* %addr) {
2049 ; CHECK-LABEL: name: test_atomicrmw_nand
2050 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2051 ; CHECK-NEXT: liveins: $x0
2052 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2053 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2054 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_NAND [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2055 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2056 %oldval = atomicrmw nand i256* %addr, i256 1 seq_cst
2057 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2058 ; test so work around it by truncating to i32 for now.
2059 %oldval.trunc = trunc i256 %oldval to i32
2060 ret i32 %oldval.trunc
2063 ; Try an seq_cst atomicrmw or
2064 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2065 define i32 @test_atomicrmw_or(i256* %addr) {
2066 ; CHECK-LABEL: name: test_atomicrmw_or
2067 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2068 ; CHECK-NEXT: liveins: $x0
2069 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2070 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2071 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_OR [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2072 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2073 %oldval = atomicrmw or i256* %addr, i256 1 seq_cst
2074 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2075 ; test so work around it by truncating to i32 for now.
2076 %oldval.trunc = trunc i256 %oldval to i32
2077 ret i32 %oldval.trunc
2080 ; Try an seq_cst atomicrmw xor
2081 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2082 define i32 @test_atomicrmw_xor(i256* %addr) {
2083 ; CHECK-LABEL: name: test_atomicrmw_xor
2084 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2085 ; CHECK-NEXT: liveins: $x0
2086 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2087 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2088 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XOR [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2089 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2090 %oldval = atomicrmw xor i256* %addr, i256 1 seq_cst
2091 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2092 ; test so work around it by truncating to i32 for now.
2093 %oldval.trunc = trunc i256 %oldval to i32
2094 ret i32 %oldval.trunc
2097 ; Try an seq_cst atomicrmw min
2098 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2099 define i32 @test_atomicrmw_min(i256* %addr) {
2100 ; CHECK-LABEL: name: test_atomicrmw_min
2101 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2102 ; CHECK-NEXT: liveins: $x0
2103 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2104 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2105 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2106 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2107 %oldval = atomicrmw min i256* %addr, i256 1 seq_cst
2108 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2109 ; test so work around it by truncating to i32 for now.
2110 %oldval.trunc = trunc i256 %oldval to i32
2111 ret i32 %oldval.trunc
2114 ; Try an seq_cst atomicrmw max
2115 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2116 define i32 @test_atomicrmw_max(i256* %addr) {
2117 ; CHECK-LABEL: name: test_atomicrmw_max
2118 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2119 ; CHECK-NEXT: liveins: $x0
2120 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2121 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2122 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2123 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2124 %oldval = atomicrmw max i256* %addr, i256 1 seq_cst
2125 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2126 ; test so work around it by truncating to i32 for now.
2127 %oldval.trunc = trunc i256 %oldval to i32
2128 ret i32 %oldval.trunc
2131 ; Try an seq_cst atomicrmw unsigned min
2132 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2133 define i32 @test_atomicrmw_umin(i256* %addr) {
2134 ; CHECK-LABEL: name: test_atomicrmw_umin
2135 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2136 ; CHECK-NEXT: liveins: $x0
2137 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2138 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2139 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2140 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2141 %oldval = atomicrmw umin i256* %addr, i256 1 seq_cst
2142 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2143 ; test so work around it by truncating to i32 for now.
2144 %oldval.trunc = trunc i256 %oldval to i32
2145 ret i32 %oldval.trunc
2148 ; Try an seq_cst atomicrmw unsigned max
2149 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2150 define i32 @test_atomicrmw_umax(i256* %addr) {
2151 ; CHECK-LABEL: name: test_atomicrmw_umax
2152 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2153 ; CHECK-NEXT: liveins: $x0
2154 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2155 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2156 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
2157 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2158 %oldval = atomicrmw umax i256* %addr, i256 1 seq_cst
2159 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2160 ; test so work around it by truncating to i32 for now.
2161 %oldval.trunc = trunc i256 %oldval to i32
2162 ret i32 %oldval.trunc
2165 @addr = global i8* null
2167 define void @test_blockaddress() {
2168 ; CHECK-LABEL: name: test_blockaddress
2169 ; CHECK: [[BADDR:%[0-9]+]]:_(p0) = G_BLOCK_ADDR blockaddress(@test_blockaddress, %ir-block.block)
2170 ; CHECK: G_STORE [[BADDR]](p0)
2171 store i8* blockaddress(@test_blockaddress, %block), i8** @addr
2172 indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
2178 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) readonly nounwind
2179 declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
2180 define void @test_invariant_intrin() {
2181 ; CHECK-LABEL: name: test_invariant_intrin
2182 ; CHECK: %{{[0-9]+}}:_(s64) = G_IMPLICIT_DEF
2183 ; CHECK-NEXT: RET_ReallyLR
2185 %y = bitcast %t* %x to i8*
2186 %inv = call {}* @llvm.invariant.start.p0i8(i64 8, i8* %y)
2187 call void @llvm.invariant.end.p0i8({}* %inv, i64 8, i8* %y)
2191 declare float @llvm.ceil.f32(float)
2192 define float @test_ceil_f32(float %x) {
2193 ; CHECK-LABEL: name: test_ceil_f32
2194 ; CHECK: %{{[0-9]+}}:_(s32) = G_FCEIL %{{[0-9]+}}
2195 %y = call float @llvm.ceil.f32(float %x)
2199 declare double @llvm.ceil.f64(double)
2200 define double @test_ceil_f64(double %x) {
2201 ; CHECK-LABEL: name: test_ceil_f64
2202 ; CHECK: %{{[0-9]+}}:_(s64) = G_FCEIL %{{[0-9]+}}
2203 %y = call double @llvm.ceil.f64(double %x)
2207 declare <2 x float> @llvm.ceil.v2f32(<2 x float>)
2208 define <2 x float> @test_ceil_v2f32(<2 x float> %x) {
2209 ; CHECK-LABEL: name: test_ceil_v2f32
2210 ; CHECK: %{{[0-9]+}}:_(<2 x s32>) = G_FCEIL %{{[0-9]+}}
2211 %y = call <2 x float> @llvm.ceil.v2f32(<2 x float> %x)
2215 declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
2216 define <4 x float> @test_ceil_v4f32(<4 x float> %x) {
2217 ; CHECK-LABEL: name: test_ceil_v4f32
2218 ; CHECK: %{{[0-9]+}}:_(<4 x s32>) = G_FCEIL %{{[0-9]+}}
2219 ; SELECT: %{{[0-9]+}}:fpr128 = FRINTPv4f32 %{{[0-9]+}}
2220 %y = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x)
2224 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
2225 define <2 x double> @test_ceil_v2f64(<2 x double> %x) {
2226 ; CHECK-LABEL: name: test_ceil_v2f64
2227 ; CHECK: %{{[0-9]+}}:_(<2 x s64>) = G_FCEIL %{{[0-9]+}}
2228 %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x)
2232 declare float @llvm.cos.f32(float)
2233 define float @test_cos_f32(float %x) {
2234 ; CHECK-LABEL: name: test_cos_f32
2235 ; CHECK: %{{[0-9]+}}:_(s32) = G_FCOS %{{[0-9]+}}
2236 %y = call float @llvm.cos.f32(float %x)
2240 declare float @llvm.sin.f32(float)
2241 define float @test_sin_f32(float %x) {
2242 ; CHECK-LABEL: name: test_sin_f32
2243 ; CHECK: %{{[0-9]+}}:_(s32) = G_FSIN %{{[0-9]+}}
2244 %y = call float @llvm.sin.f32(float %x)
2248 declare float @llvm.sqrt.f32(float)
2249 define float @test_sqrt_f32(float %x) {
2250 ; CHECK-LABEL: name: test_sqrt_f32
2251 ; CHECK: %{{[0-9]+}}:_(s32) = G_FSQRT %{{[0-9]+}}
2252 %y = call float @llvm.sqrt.f32(float %x)
2256 declare float @llvm.floor.f32(float)
2257 define float @test_floor_f32(float %x) {
2258 ; CHECK-LABEL: name: test_floor_f32
2259 ; CHECK: %{{[0-9]+}}:_(s32) = G_FFLOOR %{{[0-9]+}}
2260 %y = call float @llvm.floor.f32(float %x)
2264 declare float @llvm.nearbyint.f32(float)
2265 define float @test_nearbyint_f32(float %x) {
2266 ; CHECK-LABEL: name: test_nearbyint_f32
2267 ; CHECK: %{{[0-9]+}}:_(s32) = G_FNEARBYINT %{{[0-9]+}}
2268 %y = call float @llvm.nearbyint.f32(float %x)
2272 ; CHECK-LABEL: name: test_llvm.aarch64.neon.ld3.v4i32.p0i32
2273 ; CHECK: %1:_(<4 x s32>), %2:_(<4 x s32>), %3:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld3), %0(p0) :: (load 48 from %ir.ptr, align 64)
2274 define void @test_llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr) {
2275 %arst = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr)
2279 declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*) #3
2281 define void @test_i1_arg_zext(void (i1)* %f) {
2282 ; CHECK-LABEL: name: test_i1_arg_zext
2283 ; CHECK: [[I1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
2284 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[I1]](s1)
2285 ; CHECK: $w0 = COPY [[ZEXT]](s32)
2286 call void %f(i1 true)
2290 declare i8* @llvm.stacksave()
2291 declare void @llvm.stackrestore(i8*)
2292 define void @test_stacksaverestore() {
2293 ; CHECK-LABEL: name: test_stacksaverestore
2294 ; CHECK: [[SAVE:%[0-9]+]]:_(p0) = COPY $sp
2295 ; CHECK-NEXT: $sp = COPY [[SAVE]](p0)
2296 ; CHECK-NEXT: RET_ReallyLR
2297 %sp = call i8* @llvm.stacksave()
2298 call void @llvm.stackrestore(i8* %sp)
2302 declare float @llvm.rint.f32(float)
2303 define float @test_rint_f32(float %x) {
2304 ; CHECK-LABEL: name: test_rint_f32
2305 ; CHECK: %{{[0-9]+}}:_(s32) = G_FRINT %{{[0-9]+}}
2306 %y = call float @llvm.rint.f32(float %x)
2310 declare void @llvm.assume(i1)
2311 define void @test_assume(i1 %x) {
2312 ; CHECK-LABEL: name: test_assume
2313 ; CHECK-NOT: llvm.assume
2314 ; CHECK: RET_ReallyLR
2315 call void @llvm.assume(i1 %x)
2319 declare void @llvm.sideeffect()
2320 define void @test_sideeffect() {
2321 ; CHECK-LABEL: name: test_sideeffect
2322 ; CHECK-NOT: llvm.sideeffect
2323 ; CHECK: RET_ReallyLR
2324 call void @llvm.sideeffect()
2328 declare void @llvm.var.annotation(i8*, i8*, i8*, i32)
2329 define void @test_var_annotation(i8*, i8*, i8*, i32) {
2330 ; CHECK-LABEL: name: test_var_annotation
2331 ; CHECK-NOT: llvm.var.annotation
2332 ; CHECK: RET_ReallyLR
2333 call void @llvm.var.annotation(i8* %0, i8* %1, i8* %2, i32 %3)
2337 !0 = !{ i64 0, i64 2 }