1 ; RUN: llc -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
2 ; RUN: llc -O3 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefix=O3
4 ; This file checks that the translation from llvm IR to generic MachineInstr
6 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
7 target triple = "aarch64--"
10 ; CHECK-LABEL: name: addi64
11 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
12 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
13 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_ADD [[ARG1]], [[ARG2]]
14 ; CHECK-NEXT: $x0 = COPY [[RES]]
15 ; CHECK-NEXT: RET_ReallyLR implicit $x0
16 define i64 @addi64(i64 %arg1, i64 %arg2) {
17 %res = add i64 %arg1, %arg2
21 ; CHECK-LABEL: name: muli64
22 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
23 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
24 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_MUL [[ARG1]], [[ARG2]]
25 ; CHECK-NEXT: $x0 = COPY [[RES]]
26 ; CHECK-NEXT: RET_ReallyLR implicit $x0
27 define i64 @muli64(i64 %arg1, i64 %arg2) {
28 %res = mul i64 %arg1, %arg2
33 ; CHECK-LABEL: name: allocai64
35 ; CHECK-NEXT: - { id: 0, name: ptr1, type: default, offset: 0, size: 8, alignment: 8,
36 ; CHECK-NEXT: stack-id: default, callee-saved-register: '', callee-saved-restored: true,
37 ; CHECK-NEXT: debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
38 ; CHECK-NEXT: - { id: 1, name: ptr2, type: default, offset: 0, size: 8, alignment: 1,
39 ; CHECK-NEXT: stack-id: default, callee-saved-register: '', callee-saved-restored: true,
40 ; CHECK-NEXT: debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
41 ; CHECK-NEXT: - { id: 2, name: ptr3, type: default, offset: 0, size: 128, alignment: 8,
42 ; CHECK-NEXT: stack-id: default, callee-saved-register: '', callee-saved-restored: true,
43 ; CHECK-NEXT: debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
44 ; CHECK-NEXT: - { id: 3, name: ptr4, type: default, offset: 0, size: 1, alignment: 8,
45 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.0.ptr1
46 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.1.ptr2
47 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.2.ptr3
48 ; CHECK: %{{[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.3.ptr4
49 define void @allocai64() {
51 %ptr2 = alloca i64, align 1
52 %ptr3 = alloca i64, i32 16
53 %ptr4 = alloca [0 x i64]
58 ; CHECK-LABEL: name: uncondbr
61 ; ABI/constant lowering and IR-level entry basic block.
62 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
64 ; Make sure we have one successor and only one.
65 ; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+]](0x80000000)
67 ; Check that we emit the correct branch.
68 ; CHECK: G_BR %[[BB2]]
70 ; Check that end contains the return instruction.
71 ; CHECK: [[END:bb.[0-9]+]].{{[a-zA-Z0-9.]+}}:
72 ; CHECK-NEXT: RET_ReallyLR
74 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
75 ; CHECK-NEXT: successors: %[[END]](0x80000000)
76 ; CHECK: G_BR %[[END]]
77 define void @uncondbr() {
86 ; CHECK-LABEL: name: uncondbr_fallthrough
88 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
89 ; CHECK-NEXT: successors: %[[END:bb.[0-9]+]](0x80000000)
90 ; CHECK: [[END]].{{[a-zA-Z0-9.]+}}:
91 ; CHECK-NEXT: RET_ReallyLR
92 define void @uncondbr_fallthrough() {
99 ; Tests for conditional br.
100 ; CHECK-LABEL: name: condbr
103 ; ABI/constant lowering and IR-level entry basic block.
104 ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
105 ; Make sure we have two successors
106 ; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000),
107 ; CHECK: %[[FALSE:bb.[0-9]+]](0x40000000)
109 ; CHECK: [[ADDR:%.*]]:_(p0) = COPY $x0
111 ; Check that we emit the correct branch.
112 ; CHECK: [[TST:%.*]]:_(s1) = G_LOAD [[ADDR]](p0)
113 ; CHECK: G_BRCOND [[TST]](s1), %[[TRUE]]
114 ; CHECK: G_BR %[[FALSE]]
116 ; Check that each successor contains the return instruction.
117 ; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}:
118 ; CHECK-NEXT: RET_ReallyLR
119 ; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
120 ; CHECK-NEXT: RET_ReallyLR
121 define void @condbr(i1* %tstaddr) {
122 %tst = load i1, i1* %tstaddr
123 br i1 %tst, label %true, label %false
130 ; Tests for indirect br.
131 ; CHECK-LABEL: name: indirectbr
134 ; ABI/constant lowering and IR-level entry basic block.
135 ; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}:
136 ; Make sure we have one successor
137 ; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+]](0x80000000)
139 ; Check basic block L1 has 2 successors: BBL1 and BBL2
140 ; CHECK: [[BB_L1]].{{[a-zA-Z0-9.]+}} (address-taken):
141 ; CHECK-NEXT: successors: %[[BB_L1]](0x40000000),
142 ; CHECK: %[[BB_L2:bb.[0-9]+]](0x40000000)
143 ; CHECK: G_BRINDIRECT %{{[0-9]+}}(p0)
145 ; Check basic block L2 is the return basic block
146 ; CHECK: [[BB_L2]].{{[a-zA-Z0-9.]+}} (address-taken):
147 ; CHECK-NEXT: RET_ReallyLR
149 @indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8
151 define void @indirectbr() {
154 L1: ; preds = %entry, %L1
155 %i = phi i32 [ 0, %entry ], [ %inc, %L1 ]
157 %idxprom = zext i32 %i to i64
158 %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @indirectbr.L, i64 0, i64 %idxprom
159 %brtarget = load i8*, i8** %arrayidx, align 8
160 indirectbr i8* %brtarget, [label %L1, label %L2]
166 ; CHECK-LABEL: name: ori64
167 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
168 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
169 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_OR [[ARG1]], [[ARG2]]
170 ; CHECK-NEXT: $x0 = COPY [[RES]]
171 ; CHECK-NEXT: RET_ReallyLR implicit $x0
172 define i64 @ori64(i64 %arg1, i64 %arg2) {
173 %res = or i64 %arg1, %arg2
177 ; CHECK-LABEL: name: ori32
178 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
179 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
180 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_OR [[ARG1]], [[ARG2]]
181 ; CHECK-NEXT: $w0 = COPY [[RES]]
182 ; CHECK-NEXT: RET_ReallyLR implicit $w0
183 define i32 @ori32(i32 %arg1, i32 %arg2) {
184 %res = or i32 %arg1, %arg2
189 ; CHECK-LABEL: name: xori64
190 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
191 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
192 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_XOR [[ARG1]], [[ARG2]]
193 ; CHECK-NEXT: $x0 = COPY [[RES]]
194 ; CHECK-NEXT: RET_ReallyLR implicit $x0
195 define i64 @xori64(i64 %arg1, i64 %arg2) {
196 %res = xor i64 %arg1, %arg2
200 ; CHECK-LABEL: name: xori32
201 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
202 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
203 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_XOR [[ARG1]], [[ARG2]]
204 ; CHECK-NEXT: $w0 = COPY [[RES]]
205 ; CHECK-NEXT: RET_ReallyLR implicit $w0
206 define i32 @xori32(i32 %arg1, i32 %arg2) {
207 %res = xor i32 %arg1, %arg2
212 ; CHECK-LABEL: name: andi64
213 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
214 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
215 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_AND [[ARG1]], [[ARG2]]
216 ; CHECK-NEXT: $x0 = COPY [[RES]]
217 ; CHECK-NEXT: RET_ReallyLR implicit $x0
218 define i64 @andi64(i64 %arg1, i64 %arg2) {
219 %res = and i64 %arg1, %arg2
223 ; CHECK-LABEL: name: andi32
224 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
225 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
226 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_AND [[ARG1]], [[ARG2]]
227 ; CHECK-NEXT: $w0 = COPY [[RES]]
228 ; CHECK-NEXT: RET_ReallyLR implicit $w0
229 define i32 @andi32(i32 %arg1, i32 %arg2) {
230 %res = and i32 %arg1, %arg2
235 ; CHECK-LABEL: name: subi64
236 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
237 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
238 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_SUB [[ARG1]], [[ARG2]]
239 ; CHECK-NEXT: $x0 = COPY [[RES]]
240 ; CHECK-NEXT: RET_ReallyLR implicit $x0
241 define i64 @subi64(i64 %arg1, i64 %arg2) {
242 %res = sub i64 %arg1, %arg2
246 ; CHECK-LABEL: name: subi32
247 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
248 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
249 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SUB [[ARG1]], [[ARG2]]
250 ; CHECK-NEXT: $w0 = COPY [[RES]]
251 ; CHECK-NEXT: RET_ReallyLR implicit $w0
252 define i32 @subi32(i32 %arg1, i32 %arg2) {
253 %res = sub i32 %arg1, %arg2
257 ; CHECK-LABEL: name: ptrtoint
258 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
259 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[ARG1]]
260 ; CHECK: $x0 = COPY [[RES]]
261 ; CHECK: RET_ReallyLR implicit $x0
262 define i64 @ptrtoint(i64* %a) {
263 %val = ptrtoint i64* %a to i64
267 ; CHECK-LABEL: name: inttoptr
268 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
269 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_INTTOPTR [[ARG1]]
270 ; CHECK: $x0 = COPY [[RES]]
271 ; CHECK: RET_ReallyLR implicit $x0
272 define i64* @inttoptr(i64 %a) {
273 %val = inttoptr i64 %a to i64*
277 ; CHECK-LABEL: name: trivial_bitcast
278 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
279 ; CHECK: $x0 = COPY [[ARG1]]
280 ; CHECK: RET_ReallyLR implicit $x0
281 define i64* @trivial_bitcast(i8* %a) {
282 %val = bitcast i8* %a to i64*
286 ; CHECK-LABEL: name: trivial_bitcast_with_copy
287 ; CHECK: [[A:%[0-9]+]]:_(p0) = COPY $x0
288 ; CHECK: G_BR %[[CAST:bb\.[0-9]+]]
290 ; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}:
291 ; CHECK: $x0 = COPY [[A]]
293 ; CHECK: [[CAST]].{{[a-zA-Z0-9.]+}}:
294 ; CHECK: G_BR %[[END]]
295 define i64* @trivial_bitcast_with_copy(i8* %a) {
302 %val = bitcast i8* %a to i64*
306 ; CHECK-LABEL: name: bitcast
307 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
308 ; CHECK: [[RES1:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[ARG1]]
309 ; CHECK: [[RES2:%[0-9]+]]:_(s64) = G_BITCAST [[RES1]]
310 ; CHECK: $x0 = COPY [[RES2]]
311 ; CHECK: RET_ReallyLR implicit $x0
312 define i64 @bitcast(i64 %a) {
313 %res1 = bitcast i64 %a to <2 x i32>
314 %res2 = bitcast <2 x i32> %res1 to i64
318 ; CHECK-LABEL: name: addrspacecast
319 ; CHECK: [[ARG1:%[0-9]+]]:_(p1) = COPY $x0
320 ; CHECK: [[RES1:%[0-9]+]]:_(p2) = G_ADDRSPACE_CAST [[ARG1]]
321 ; CHECK: [[RES2:%[0-9]+]]:_(p0) = G_ADDRSPACE_CAST [[RES1]]
322 ; CHECK: $x0 = COPY [[RES2]]
323 ; CHECK: RET_ReallyLR implicit $x0
324 define i64* @addrspacecast(i32 addrspace(1)* %a) {
325 %res1 = addrspacecast i32 addrspace(1)* %a to i64 addrspace(2)*
326 %res2 = addrspacecast i64 addrspace(2)* %res1 to i64*
330 ; CHECK-LABEL: name: trunc
331 ; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
332 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_LOAD
333 ; CHECK: [[RES1:%[0-9]+]]:_(s8) = G_TRUNC [[ARG1]]
334 ; CHECK: [[RES2:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[VEC]]
335 define void @trunc(i64 %a) {
336 %vecptr = alloca <4 x i32>
337 %vec = load <4 x i32>, <4 x i32>* %vecptr
338 %res1 = trunc i64 %a to i8
339 %res2 = trunc <4 x i32> %vec to <4 x i16>
343 ; CHECK-LABEL: name: load
344 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
345 ; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
346 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr, align 16)
347 ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load (s64) from %ir.addr42, addrspace 42)
348 ; CHECK: [[SUM2:%.*]]:_(s64) = G_ADD [[VAL1]], [[VAL2]]
349 ; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load (s64) from %ir.addr)
350 ; CHECK: [[SUM3:%[0-9]+]]:_(s64) = G_ADD [[SUM2]], [[VAL3]]
351 ; CHECK: [[VAL4:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr, !range !0)
352 ; CHECK: [[SUM4:%[0-9]+]]:_(s64) = G_ADD [[SUM3]], [[VAL4]]
353 ; CHECK: $x0 = COPY [[SUM4]]
354 ; CHECK: RET_ReallyLR implicit $x0
355 define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
356 %val1 = load i64, i64* %addr, align 16
358 %val2 = load i64, i64 addrspace(42)* %addr42
359 %sum2 = add i64 %val1, %val2
361 %val3 = load volatile i64, i64* %addr
362 %sum3 = add i64 %sum2, %val3
364 %val4 = load i64, i64* %addr, !range !0
365 %sum4 = add i64 %sum3, %val4
369 ; CHECK-LABEL: name: store
370 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
371 ; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
372 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY $x2
373 ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY $x3
374 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store (s64) into %ir.addr, align 16)
375 ; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store (s64) into %ir.addr42, addrspace 42)
376 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store (s64) into %ir.addr)
377 ; CHECK: RET_ReallyLR
378 define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) {
379 store i64 %val1, i64* %addr, align 16
380 store i64 %val2, i64 addrspace(42)* %addr42
381 store volatile i64 %val1, i64* %addr
382 %sum = add i64 %val1, %val2
386 ; CHECK-LABEL: name: intrinsics
387 ; CHECK: [[CUR:%[0-9]+]]:_(s32) = COPY $w0
388 ; CHECK: [[BITS:%[0-9]+]]:_(s32) = COPY $w1
389 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), 0
390 ; CHECK: [[PTR_VEC:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.ptr.vec
391 ; CHECK: [[VEC:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[PTR_VEC]]
392 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.st2), [[VEC]](<8 x s8>), [[VEC]](<8 x s8>), [[PTR]](p0)
393 ; CHECK: RET_ReallyLR
394 declare i8* @llvm.returnaddress(i32)
395 declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
396 declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
397 define void @intrinsics(i32 %cur, i32 %bits) {
398 %ptr = call i8* @llvm.returnaddress(i32 0)
399 %ptr.vec = alloca <8 x i8>
400 %vec = load <8 x i8>, <8 x i8>* %ptr.vec
401 call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec, <8 x i8> %vec, i8* %ptr)
405 ; CHECK-LABEL: name: test_phi
406 ; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+]]
407 ; CHECK: G_BR %[[FALSE:bb\.[0-9]+]]
409 ; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}:
410 ; CHECK: [[RES1:%[0-9]+]]:_(s32) = G_LOAD
412 ; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
413 ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_LOAD
415 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]]
416 ; CHECK: $w0 = COPY [[RES]]
417 define i32 @test_phi(i32* %addr1, i32* %addr2, i1 %tst) {
418 br i1 %tst, label %true, label %false
421 %res1 = load i32, i32* %addr1
425 %res2 = load i32, i32* %addr2
429 %res = phi i32 [%res1, %true], [%res2, %false]
433 ; CHECK-LABEL: name: unreachable
437 define void @unreachable(i32 %a) {
438 %sum = add i32 %a, %a
442 ; It's important that constants are after argument passing, but before the
443 ; rest of the entry block.
444 ; CHECK-LABEL: name: constant_int
445 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
446 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
448 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
449 ; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
450 ; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
451 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]]
452 ; CHECK: $w0 = COPY [[RES]]
454 define i32 @constant_int(i32 %in) {
458 %sum1 = add i32 %in, 1
459 %sum2 = add i32 %in, 1
460 %res = add i32 %sum1, %sum2
464 ; CHECK-LABEL: name: constant_int_start
465 ; CHECK: [[TWO:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
466 ; CHECK: [[ANSWER:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
467 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
468 define i32 @constant_int_start() {
473 ; CHECK-LABEL: name: test_undef
474 ; CHECK: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
475 ; CHECK: $w0 = COPY [[UNDEF]]
476 define i32 @test_undef() {
480 ; CHECK-LABEL: name: test_constant_inttoptr
481 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
482 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ONE]]
483 ; CHECK: $x0 = COPY [[PTR]]
484 define i8* @test_constant_inttoptr() {
485 ret i8* inttoptr(i64 1 to i8*)
488 ; This failed purely because the Constant -> VReg map was kept across
489 ; functions, so reuse the "i64 1" from above.
490 ; CHECK-LABEL: name: test_reused_constant
491 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
492 ; CHECK: $x0 = COPY [[ONE]]
493 define i64 @test_reused_constant() {
497 ; CHECK-LABEL: name: test_sext
498 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
499 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_SEXT [[IN]]
500 ; CHECK: $x0 = COPY [[RES]]
501 define i64 @test_sext(i32 %in) {
502 %res = sext i32 %in to i64
506 ; CHECK-LABEL: name: test_zext
507 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
508 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ZEXT [[IN]]
509 ; CHECK: $x0 = COPY [[RES]]
510 define i64 @test_zext(i32 %in) {
511 %res = zext i32 %in to i64
515 ; CHECK-LABEL: name: test_shl
516 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
517 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
518 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SHL [[ARG1]], [[ARG2]]
519 ; CHECK-NEXT: $w0 = COPY [[RES]]
520 ; CHECK-NEXT: RET_ReallyLR implicit $w0
521 define i32 @test_shl(i32 %arg1, i32 %arg2) {
522 %res = shl i32 %arg1, %arg2
527 ; CHECK-LABEL: name: test_lshr
528 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
529 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
530 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_LSHR [[ARG1]], [[ARG2]]
531 ; CHECK-NEXT: $w0 = COPY [[RES]]
532 ; CHECK-NEXT: RET_ReallyLR implicit $w0
533 define i32 @test_lshr(i32 %arg1, i32 %arg2) {
534 %res = lshr i32 %arg1, %arg2
538 ; CHECK-LABEL: name: test_ashr
539 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
540 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
541 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_ASHR [[ARG1]], [[ARG2]]
542 ; CHECK-NEXT: $w0 = COPY [[RES]]
543 ; CHECK-NEXT: RET_ReallyLR implicit $w0
544 define i32 @test_ashr(i32 %arg1, i32 %arg2) {
545 %res = ashr i32 %arg1, %arg2
549 ; CHECK-LABEL: name: test_sdiv
550 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
551 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
552 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SDIV [[ARG1]], [[ARG2]]
553 ; CHECK-NEXT: $w0 = COPY [[RES]]
554 ; CHECK-NEXT: RET_ReallyLR implicit $w0
555 define i32 @test_sdiv(i32 %arg1, i32 %arg2) {
556 %res = sdiv i32 %arg1, %arg2
560 ; CHECK-LABEL: name: test_udiv
561 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
562 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
563 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UDIV [[ARG1]], [[ARG2]]
564 ; CHECK-NEXT: $w0 = COPY [[RES]]
565 ; CHECK-NEXT: RET_ReallyLR implicit $w0
566 define i32 @test_udiv(i32 %arg1, i32 %arg2) {
567 %res = udiv i32 %arg1, %arg2
571 ; CHECK-LABEL: name: test_srem
572 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
573 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
574 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SREM [[ARG1]], [[ARG2]]
575 ; CHECK-NEXT: $w0 = COPY [[RES]]
576 ; CHECK-NEXT: RET_ReallyLR implicit $w0
577 define i32 @test_srem(i32 %arg1, i32 %arg2) {
578 %res = srem i32 %arg1, %arg2
582 ; CHECK-LABEL: name: test_urem
583 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
584 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
585 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UREM [[ARG1]], [[ARG2]]
586 ; CHECK-NEXT: $w0 = COPY [[RES]]
587 ; CHECK-NEXT: RET_ReallyLR implicit $w0
588 define i32 @test_urem(i32 %arg1, i32 %arg2) {
589 %res = urem i32 %arg1, %arg2
593 ; CHECK-LABEL: name: test_constant_null
594 ; CHECK: [[NULL:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
595 ; CHECK: $x0 = COPY [[NULL]]
596 define i8* @test_constant_null() {
600 ; CHECK-LABEL: name: test_struct_memops
601 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
602 ; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
603 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
604 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
605 ; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr + 4)
606 ; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store (s8) into %ir.addr, align 4)
607 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
608 ; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store (s32) into %ir.addr + 4)
609 define void @test_struct_memops({ i8, i32 }* %addr) {
610 %val = load { i8, i32 }, { i8, i32 }* %addr
611 store { i8, i32 } %val, { i8, i32 }* %addr
615 ; CHECK-LABEL: name: test_i1_memops
616 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
617 ; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load (s1) from %ir.addr)
618 ; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store (s1) into %ir.addr)
619 define void @test_i1_memops(i1* %addr) {
620 %val = load i1, i1* %addr
621 store i1 %val, i1* %addr
625 ; CHECK-LABEL: name: int_comparison
626 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
627 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
628 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
629 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LHS]](s32), [[RHS]]
630 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
631 define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
632 %res = icmp ne i32 %a, %b
633 store i1 %res, i1* %addr
637 ; CHECK-LABEL: name: ptr_comparison
638 ; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x0
639 ; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x1
640 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
641 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LHS]](p0), [[RHS]]
642 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
643 define void @ptr_comparison(i8* %a, i8* %b, i1* %addr) {
644 %res = icmp eq i8* %a, %b
645 store i1 %res, i1* %addr
649 ; CHECK-LABEL: name: test_fadd
650 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
651 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
652 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FADD [[ARG1]], [[ARG2]]
653 ; CHECK-NEXT: $s0 = COPY [[RES]]
654 ; CHECK-NEXT: RET_ReallyLR implicit $s0
655 define float @test_fadd(float %arg1, float %arg2) {
656 %res = fadd float %arg1, %arg2
660 ; CHECK-LABEL: name: test_fsub
661 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
662 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
663 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FSUB [[ARG1]], [[ARG2]]
664 ; CHECK-NEXT: $s0 = COPY [[RES]]
665 ; CHECK-NEXT: RET_ReallyLR implicit $s0
666 define float @test_fsub(float %arg1, float %arg2) {
667 %res = fsub float %arg1, %arg2
671 ; CHECK-LABEL: name: test_fmul
672 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
673 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
674 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FMUL [[ARG1]], [[ARG2]]
675 ; CHECK-NEXT: $s0 = COPY [[RES]]
676 ; CHECK-NEXT: RET_ReallyLR implicit $s0
677 define float @test_fmul(float %arg1, float %arg2) {
678 %res = fmul float %arg1, %arg2
682 ; CHECK-LABEL: name: test_fdiv
683 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
684 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
685 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FDIV [[ARG1]], [[ARG2]]
686 ; CHECK-NEXT: $s0 = COPY [[RES]]
687 ; CHECK-NEXT: RET_ReallyLR implicit $s0
688 define float @test_fdiv(float %arg1, float %arg2) {
689 %res = fdiv float %arg1, %arg2
693 ; CHECK-LABEL: name: test_frem
694 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
695 ; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
696 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FREM [[ARG1]], [[ARG2]]
697 ; CHECK-NEXT: $s0 = COPY [[RES]]
698 ; CHECK-NEXT: RET_ReallyLR implicit $s0
699 define float @test_frem(float %arg1, float %arg2) {
700 %res = frem float %arg1, %arg2
704 ; CHECK-LABEL: name: test_sadd_overflow
705 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
706 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
707 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
708 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]]
709 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
710 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
711 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
712 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
713 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
714 define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
715 %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %lhs, i32 %rhs)
716 store { i32, i1 } %res, { i32, i1 }* %addr
720 ; CHECK-LABEL: name: test_uadd_overflow
721 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
722 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
723 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
724 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDO [[LHS]], [[RHS]]
725 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
726 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
727 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
728 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
729 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
730 define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
731 %res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
732 store { i32, i1 } %res, { i32, i1 }* %addr
736 ; CHECK-LABEL: name: test_ssub_overflow
737 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
738 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
739 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
740 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]]
741 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr)
742 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
743 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
744 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
745 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
746 define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
747 %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %lhs, i32 %rhs)
748 store { i32, i1 } %res, { i32, i1 }* %subr
752 ; CHECK-LABEL: name: test_usub_overflow
753 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
754 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
755 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
756 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBO [[LHS]], [[RHS]]
757 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr)
758 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
759 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
760 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
761 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
762 define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
763 %res = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %lhs, i32 %rhs)
764 store { i32, i1 } %res, { i32, i1 }* %subr
768 ; CHECK-LABEL: name: test_smul_overflow
769 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
770 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
771 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
772 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]]
773 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
774 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
775 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
776 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
777 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
778 define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
779 %res = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %lhs, i32 %rhs)
780 store { i32, i1 } %res, { i32, i1 }* %addr
784 ; CHECK-LABEL: name: test_umul_overflow
785 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
786 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
787 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
788 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]]
789 ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
790 ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
791 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
792 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
793 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
794 define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
795 %res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
796 store { i32, i1 } %res, { i32, i1 }* %addr
800 ; CHECK-LABEL: name: test_extractvalue
801 ; CHECK: %0:_(p0) = COPY $x0
802 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
803 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
804 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
805 ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
806 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
807 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
808 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
809 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
810 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
811 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
812 ; CHECK: $w0 = COPY [[LD3]](s32)
813 %struct.nested = type {i8, { i8, i32 }, i32}
814 define i32 @test_extractvalue(%struct.nested* %addr) {
815 %struct = load %struct.nested, %struct.nested* %addr
816 %res = extractvalue %struct.nested %struct, 1, 1
820 ; CHECK-LABEL: name: test_extractvalue_agg
821 ; CHECK: %0:_(p0) = COPY $x0
822 ; CHECK: %1:_(p0) = COPY $x1
823 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
824 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
825 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
826 ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
827 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
828 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
829 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
830 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
831 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
832 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
833 ; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store (s8) into %ir.addr2, align 4)
834 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
835 ; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store (s32) into %ir.addr2 + 4)
836 define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
837 %struct = load %struct.nested, %struct.nested* %addr
838 %res = extractvalue %struct.nested %struct, 1
839 store {i8, i32} %res, {i8, i32}* %addr2
843 ; CHECK-LABEL: name: test_trivial_extract_ptr
844 ; CHECK: [[STRUCT:%[0-9]+]]:_(p0) = COPY $x0
845 ; CHECK: [[VAL32:%[0-9]+]]:_(s32) = COPY $w1
846 ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_TRUNC [[VAL32]]
847 ; CHECK: G_STORE [[VAL]](s8), [[STRUCT]](p0)
848 define void @test_trivial_extract_ptr([1 x i8*] %s, i8 %val) {
849 %addr = extractvalue [1 x i8*] %s, 0
850 store i8 %val, i8* %addr
854 ; CHECK-LABEL: name: test_insertvalue
855 ; CHECK: %0:_(p0) = COPY $x0
856 ; CHECK: %1:_(s32) = COPY $w1
857 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
858 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
859 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
860 ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
861 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
862 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
863 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
864 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
865 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
866 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
867 ; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4)
868 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
869 ; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store (s8) into %ir.addr + 4, align 4)
870 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
871 ; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store (s32) into %ir.addr + 8)
872 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
873 ; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 12)
874 define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
875 %struct = load %struct.nested, %struct.nested* %addr
876 %newstruct = insertvalue %struct.nested %struct, i32 %val, 1, 1
877 store %struct.nested %newstruct, %struct.nested* %addr
881 define [1 x i64] @test_trivial_insert([1 x i64] %s, i64 %val) {
882 ; CHECK-LABEL: name: test_trivial_insert
883 ; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY $x0
884 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = COPY $x1
885 ; CHECK: $x0 = COPY [[VAL]]
886 %res = insertvalue [1 x i64] %s, i64 %val, 0
890 define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
891 ; CHECK-LABEL: name: test_trivial_insert_ptr
892 ; CHECK: [[STRUCT:%[0-9]+]]:_(p0) = COPY $x0
893 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY $x1
894 ; CHECK: $x0 = COPY [[VAL]]
895 %res = insertvalue [1 x i8*] %s, i8* %val, 0
899 ; CHECK-LABEL: name: test_insertvalue_agg
900 ; CHECK: %0:_(p0) = COPY $x0
901 ; CHECK: %1:_(p0) = COPY $x1
902 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load (s8) from %ir.addr2, align 4)
903 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
904 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
905 ; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr2 + 4)
906 ; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
907 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
908 ; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load (s8) from %ir.addr + 4, align 4)
909 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
910 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
911 ; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 8)
912 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
913 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
914 ; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.addr + 12)
915 ; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4)
916 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
917 ; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store (s8) into %ir.addr + 4, align 4)
918 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
919 ; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 8)
920 ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
921 ; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store (s32) into %ir.addr + 12)
922 define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
923 %smallstruct = load {i8, i32}, {i8, i32}* %addr2
924 %struct = load %struct.nested, %struct.nested* %addr
925 %res = insertvalue %struct.nested %struct, {i8, i32} %smallstruct, 1
926 store %struct.nested %res, %struct.nested* %addr
930 ; CHECK-LABEL: name: test_select
931 ; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
932 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
933 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w1
934 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w2
935 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
936 ; CHECK: $w0 = COPY [[RES]]
937 define i32 @test_select(i1 %tst, i32 %lhs, i32 %rhs) {
938 %res = select i1 %tst, i32 %lhs, i32 %rhs
942 ; CHECK-LABEL: name: test_select_flags
943 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
944 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
945 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
946 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s1
947 ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = nnan G_SELECT [[TRUNC]](s1), [[COPY1]], [[COPY2]]
948 define float @test_select_flags(i1 %tst, float %lhs, float %rhs) {
949 %res = select nnan i1 %tst, float %lhs, float %rhs
953 ; Don't take the flags from the compare condition
954 ; CHECK-LABEL: name: test_select_cmp_flags
955 ; CHECK: [[COPY0:%[0-9]+]]:_(s32) = COPY $s0
956 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s1
957 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s2
958 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s3
959 ; CHECK: [[CMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oeq), [[COPY0]](s32), [[COPY1]]
960 ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[CMP]](s1), [[COPY2]], [[COPY3]]
961 define float @test_select_cmp_flags(float %cmp0, float %cmp1, float %lhs, float %rhs) {
962 %tst = fcmp nsz oeq float %cmp0, %cmp1
963 %res = select i1 %tst, float %lhs, float %rhs
967 ; CHECK-LABEL: name: test_select_ptr
968 ; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
969 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
970 ; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x1
971 ; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x2
972 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
973 ; CHECK: $x0 = COPY [[RES]]
974 define i8* @test_select_ptr(i1 %tst, i8* %lhs, i8* %rhs) {
975 %res = select i1 %tst, i8* %lhs, i8* %rhs
979 ; CHECK-LABEL: name: test_select_vec
980 ; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
981 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
982 ; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q0
983 ; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1
984 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
985 ; CHECK: $q0 = COPY [[RES]]
986 define <4 x i32> @test_select_vec(i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs) {
987 %res = select i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs
991 ; CHECK-LABEL: name: test_vselect_vec
992 ; CHECK: [[TST32:%[0-9]+]]:_(<4 x s32>) = COPY $q0
993 ; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1
994 ; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q2
995 ; CHECK: [[TST:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[TST32]](<4 x s32>)
996 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](<4 x s1>), [[LHS]], [[RHS]]
997 ; CHECK: $q0 = COPY [[RES]]
998 define <4 x i32> @test_vselect_vec(<4 x i32> %tst32, <4 x i32> %lhs, <4 x i32> %rhs) {
999 %tst = trunc <4 x i32> %tst32 to <4 x i1>
1000 %res = select <4 x i1> %tst, <4 x i32> %lhs, <4 x i32> %rhs
1004 ; CHECK-LABEL: name: test_fptosi
1005 ; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0
1006 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
1007 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOSI [[FP]](s32)
1008 ; CHECK: $x0 = COPY [[RES]]
1009 define i64 @test_fptosi(float* %fp.addr) {
1010 %fp = load float, float* %fp.addr
1011 %res = fptosi float %fp to i64
1015 ; CHECK-LABEL: name: test_fptoui
1016 ; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0
1017 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
1018 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOUI [[FP]](s32)
1019 ; CHECK: $x0 = COPY [[RES]]
1020 define i64 @test_fptoui(float* %fp.addr) {
1021 %fp = load float, float* %fp.addr
1022 %res = fptoui float %fp to i64
1026 ; CHECK-LABEL: name: test_sitofp
1027 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1028 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
1029 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_SITOFP [[IN]](s32)
1030 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
1031 define void @test_sitofp(double* %addr, i32 %in) {
1032 %fp = sitofp i32 %in to double
1033 store double %fp, double* %addr
1037 ; CHECK-LABEL: name: test_uitofp
1038 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1039 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
1040 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_UITOFP [[IN]](s32)
1041 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
1042 define void @test_uitofp(double* %addr, i32 %in) {
1043 %fp = uitofp i32 %in to double
1044 store double %fp, double* %addr
1048 ; CHECK-LABEL: name: test_fpext
1049 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $s0
1050 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPEXT [[IN]](s32)
1051 ; CHECK: $d0 = COPY [[RES]]
1052 define double @test_fpext(float %in) {
1053 %res = fpext float %in to double
1057 ; CHECK-LABEL: name: test_fptrunc
1058 ; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY $d0
1059 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPTRUNC [[IN]](s64)
1060 ; CHECK: $s0 = COPY [[RES]]
1061 define float @test_fptrunc(double %in) {
1062 %res = fptrunc double %in to float
1066 ; CHECK-LABEL: name: test_constant_float
1067 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1068 ; CHECK: [[TMP:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+00
1069 ; CHECK: G_STORE [[TMP]](s32), [[ADDR]](p0)
1070 define void @test_constant_float(float* %addr) {
1071 store float 1.5, float* %addr
1075 ; CHECK-LABEL: name: float_comparison
1076 ; CHECK: [[LHSADDR:%[0-9]+]]:_(p0) = COPY $x0
1077 ; CHECK: [[RHSADDR:%[0-9]+]]:_(p0) = COPY $x1
1078 ; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY $x2
1079 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = G_LOAD [[LHSADDR]](p0)
1080 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0)
1081 ; CHECK: [[TST:%[0-9]+]]:_(s1) = nnan ninf nsz arcp contract afn reassoc G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
1082 ; CHECK: G_STORE [[TST]](s1), [[BOOLADDR]](p0)
1083 define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
1084 %a = load float, float* %a.addr
1085 %b = load float, float* %b.addr
1086 %res = fcmp nnan ninf nsz arcp contract afn reassoc oge float %a, %b
1087 store i1 %res, i1* %bool.addr
1091 ; CHECK-LABEL: name: trivial_float_comparison
1092 ; CHECK: [[ENTRY_R1:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
1093 ; CHECK: [[ENTRY_R2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
1094 ; CHECK: [[R1:%[0-9]+]]:_(s1) = COPY [[ENTRY_R1]](s1)
1095 ; CHECK: [[R2:%[0-9]+]]:_(s1) = COPY [[ENTRY_R2]](s1)
1096 ; CHECK: G_ADD [[R1]], [[R2]]
1097 define i1 @trivial_float_comparison(double %a, double %b) {
1098 %r1 = fcmp false double %a, %b
1099 %r2 = fcmp true double %a, %b
1100 %sum = add i1 %r1, %r2
1106 define i32* @test_global() {
1107 ; CHECK-LABEL: name: test_global
1108 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var{{$}}
1109 ; CHECK: $x0 = COPY [[TMP]](p0)
1114 @var1 = addrspace(42) global i32 0
1115 define i32 addrspace(42)* @test_global_addrspace() {
1116 ; CHECK-LABEL: name: test_global
1117 ; CHECK: [[TMP:%[0-9]+]]:_(p42) = G_GLOBAL_VALUE @var1{{$}}
1118 ; CHECK: $x0 = COPY [[TMP]](p42)
1120 ret i32 addrspace(42)* @var1
1124 define void()* @test_global_func() {
1125 ; CHECK-LABEL: name: test_global_func
1126 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @allocai64{{$}}
1127 ; CHECK: $x0 = COPY [[TMP]](p0)
1129 ret void()* @allocai64
1132 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
1133 define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
1134 ; CHECK-LABEL: name: test_memcpy
1135 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1136 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
1137 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1138 ; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
1139 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
1143 define void @test_memcpy_tail(i8* %dst, i8* %src, i64 %size) {
1144 ; CHECK-LABEL: name: test_memcpy_tail
1145 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1146 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
1147 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1148 ; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
1149 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
1153 declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)*, i8 addrspace(1)*, i64, i1)
1154 define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %src, i64 %size) {
1155 ; CHECK-LABEL: name: test_memcpy_nonzero_as
1156 ; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0
1157 ; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1
1158 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1159 ; CHECK: G_MEMCPY [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 1)
1160 call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0)
1164 declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1)
1165 define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
1166 ; CHECK-LABEL: name: test_memmove
1167 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1168 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
1169 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1170 ; CHECK: G_MEMMOVE [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
1171 call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
1175 declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)
1176 define void @test_memset(i8* %dst, i8 %val, i64 %size) {
1177 ; CHECK-LABEL: name: test_memset
1178 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
1179 ; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
1180 ; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
1181 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
1182 ; CHECK: G_MEMSET [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst)
1183 call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
1187 define void @test_large_const(i128* %addr) {
1188 ; CHECK-LABEL: name: test_large_const
1189 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1190 ; CHECK: [[VAL:%[0-9]+]]:_(s128) = G_CONSTANT i128 42
1191 ; CHECK: G_STORE [[VAL]](s128), [[ADDR]](p0)
1192 store i128 42, i128* %addr
1196 ; When there was no formal argument handling (so the first BB was empty) we used
1197 ; to insert the constants at the end of the block, even if they were encountered
1198 ; after the block's terminators had been emitted. Also make sure the order is
1200 define i8* @test_const_placement() {
1201 ; CHECK-LABEL: name: test_const_placement
1202 ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
1203 ; CHECK: [[VAL_INT:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
1204 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = G_INTTOPTR [[VAL_INT]](s32)
1205 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
1209 ret i8* inttoptr(i32 42 to i8*)
1212 declare void @llvm.va_end(i8*)
1213 define void @test_va_end(i8* %list) {
1214 ; CHECK-LABEL: name: test_va_end
1216 ; CHECK-NOT: INTRINSIC
1217 ; CHECK: RET_ReallyLR
1218 call void @llvm.va_end(i8* %list)
1222 define void @test_va_arg(i8* %list) {
1223 ; CHECK-LABEL: test_va_arg
1224 ; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
1225 ; CHECK: G_VAARG [[LIST]](p0), 8
1226 ; CHECK: G_VAARG [[LIST]](p0), 1
1227 ; CHECK: G_VAARG [[LIST]](p0), 16
1229 %v0 = va_arg i8* %list, i64
1230 %v1 = va_arg i8* %list, i8
1231 %v2 = va_arg i8* %list, i128
1235 declare float @llvm.pow.f32(float, float)
1236 define float @test_pow_intrin(float %l, float %r) {
1237 ; CHECK-LABEL: name: test_pow_intrin
1238 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0
1239 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $s1
1240 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FPOW [[LHS]], [[RHS]]
1241 ; CHECK: $s0 = COPY [[RES]]
1242 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.pow.f32(float %l, float %r)
1246 declare float @llvm.powi.f32.i32(float, i32)
1247 define float @test_powi_intrin(float %l, i32 %r) {
1248 ; CHECK-LABEL: name: test_powi_intrin
1249 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0
1250 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w0
1251 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FPOWI [[LHS]], [[RHS]]
1252 ; CHECK: $s0 = COPY [[RES]]
1253 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.powi.f32.i32(float %l, i32 %r)
1257 declare float @llvm.fma.f32(float, float, float)
1258 define float @test_fma_intrin(float %a, float %b, float %c) {
1259 ; CHECK-LABEL: name: test_fma_intrin
1260 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1261 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
1262 ; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $s2
1263 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[A]], [[B]], [[C]]
1264 ; CHECK: $s0 = COPY [[RES]]
1265 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fma.f32(float %a, float %b, float %c)
1269 declare float @llvm.exp.f32(float)
1270 define float @test_exp_intrin(float %a) {
1271 ; CHECK-LABEL: name: test_exp_intrin
1272 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1273 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP [[A]]
1274 ; CHECK: $s0 = COPY [[RES]]
1275 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp.f32(float %a)
1279 declare float @llvm.exp2.f32(float)
1280 define float @test_exp2_intrin(float %a) {
1281 ; CHECK-LABEL: name: test_exp2_intrin
1282 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1283 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FEXP2 [[A]]
1284 ; CHECK: $s0 = COPY [[RES]]
1285 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.exp2.f32(float %a)
1289 declare float @llvm.log.f32(float)
1290 define float @test_log_intrin(float %a) {
1291 ; CHECK-LABEL: name: test_log_intrin
1292 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1293 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG [[A]]
1294 ; CHECK: $s0 = COPY [[RES]]
1295 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log.f32(float %a)
1299 declare float @llvm.log2.f32(float)
1300 define float @test_log2_intrin(float %a) {
1301 ; CHECK-LABEL: name: test_log2_intrin
1302 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1303 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG2 [[A]]
1304 ; CHECK: $s0 = COPY [[RES]]
1305 %res = call float @llvm.log2.f32(float %a)
1309 declare float @llvm.log10.f32(float)
1310 define float @test_log10_intrin(float %a) {
1311 ; CHECK-LABEL: name: test_log10_intrin
1312 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1313 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FLOG10 [[A]]
1314 ; CHECK: $s0 = COPY [[RES]]
1315 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.log10.f32(float %a)
1319 declare float @llvm.fabs.f32(float)
1320 define float @test_fabs_intrin(float %a) {
1321 ; CHECK-LABEL: name: test_fabs_intrin
1322 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1323 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FABS [[A]]
1324 ; CHECK: $s0 = COPY [[RES]]
1325 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.fabs.f32(float %a)
1329 declare float @llvm.copysign.f32(float, float)
1330 define float @test_fcopysign_intrin(float %a, float %b) {
1331 ; CHECK-LABEL: name: test_fcopysign_intrin
1332 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1333 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
1334 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FCOPYSIGN [[A]], [[B]]
1335 ; CHECK: $s0 = COPY [[RES]]
1337 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.copysign.f32(float %a, float %b)
1341 declare float @llvm.canonicalize.f32(float)
1342 define float @test_fcanonicalize_intrin(float %a) {
1343 ; CHECK-LABEL: name: test_fcanonicalize_intrin
1344 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1345 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FCANONICALIZE [[A]]
1346 ; CHECK: $s0 = COPY [[RES]]
1347 %res = call nnan ninf nsz arcp contract afn reassoc float @llvm.canonicalize.f32(float %a)
1351 declare float @llvm.trunc.f32(float)
1352 define float @test_intrinsic_trunc(float %a) {
1353 ; CHECK-LABEL: name: test_intrinsic_trunc
1354 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1355 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[A]]
1356 ; CHECK: $s0 = COPY [[RES]]
1357 %res = call float @llvm.trunc.f32(float %a)
1361 declare float @llvm.round.f32(float)
1362 define float @test_intrinsic_round(float %a) {
1363 ; CHECK-LABEL: name: test_intrinsic_round
1364 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1365 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_INTRINSIC_ROUND [[A]]
1366 ; CHECK: $s0 = COPY [[RES]]
1367 %res = call float @llvm.round.f32(float %a)
1371 declare i32 @llvm.lrint.i32.f32(float)
1372 define i32 @test_intrinsic_lrint(float %a) {
1373 ; CHECK-LABEL: name: test_intrinsic_lrint
1374 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
1375 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_INTRINSIC_LRINT [[A]]
1376 ; CHECK: $w0 = COPY [[RES]]
1377 %res = call i32 @llvm.lrint.i32.f32(float %a)
1381 declare i32 @llvm.ctlz.i32(i32, i1)
1382 define i32 @test_ctlz_intrinsic_zero_not_undef(i32 %a) {
1383 ; CHECK-LABEL: name: test_ctlz_intrinsic_zero_not_undef
1384 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1385 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CTLZ [[A]]
1386 ; CHECK: $w0 = COPY [[RES]]
1387 %res = call i32 @llvm.ctlz.i32(i32 %a, i1 0)
1391 declare i32 @llvm.cttz.i32(i32, i1)
1392 define i32 @test_cttz_intrinsic_zero_undef(i32 %a) {
1393 ; CHECK-LABEL: name: test_cttz_intrinsic_zero_undef
1394 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1395 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[A]]
1396 ; CHECK: $w0 = COPY [[RES]]
1397 %res = call i32 @llvm.cttz.i32(i32 %a, i1 1)
1401 declare i32 @llvm.ctpop.i32(i32)
1402 define i32 @test_ctpop_intrinsic(i32 %a) {
1403 ; CHECK-LABEL: name: test_ctpop
1404 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1405 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_CTPOP [[A]]
1406 ; CHECK: $w0 = COPY [[RES]]
1407 %res = call i32 @llvm.ctpop.i32(i32 %a)
1411 declare i32 @llvm.bitreverse.i32(i32)
1412 define i32 @test_bitreverse_intrinsic(i32 %a) {
1413 ; CHECK-LABEL: name: test_bitreverse
1414 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1415 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_BITREVERSE [[A]]
1416 ; CHECK: $w0 = COPY [[RES]]
1417 %res = call i32 @llvm.bitreverse.i32(i32 %a)
1421 declare i32 @llvm.fshl.i32(i32, i32, i32)
1422 define i32 @test_fshl_intrinsic(i32 %a, i32 %b, i32 %c) {
1423 ; CHECK-LABEL: name: test_fshl_intrinsic
1424 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1425 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $w1
1426 ; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $w2
1427 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FSHL [[A]], [[B]], [[C]]
1428 ; CHECK: $w0 = COPY [[RES]]
1429 %res = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
1433 declare i32 @llvm.fshr.i32(i32, i32, i32)
1434 define i32 @test_fshr_intrinsic(i32 %a, i32 %b, i32 %c) {
1435 ; CHECK-LABEL: name: test_fshr_intrinsic
1436 ; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $w0
1437 ; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $w1
1438 ; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $w2
1439 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FSHR [[A]], [[B]], [[C]]
1440 ; CHECK: $w0 = COPY [[RES]]
1441 %res = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
1445 declare void @llvm.lifetime.start.p0i8(i64, i8*)
1446 declare void @llvm.lifetime.end.p0i8(i64, i8*)
1447 define void @test_lifetime_intrin() {
1448 ; CHECK-LABEL: name: test_lifetime_intrin
1449 ; CHECK: RET_ReallyLR
1450 ; O3-LABEL: name: test_lifetime_intrin
1451 ; O3: {{%[0-9]+}}:_(p0) = G_FRAME_INDEX %stack.0.slot
1452 ; O3-NEXT: LIFETIME_START %stack.0.slot
1453 ; O3-NEXT: LIFETIME_END %stack.0.slot
1454 ; O3-NEXT: RET_ReallyLR
1455 %slot = alloca i8, i32 4
1456 call void @llvm.lifetime.start.p0i8(i64 0, i8* %slot)
1457 call void @llvm.lifetime.end.p0i8(i64 0, i8* %slot)
1461 define void @test_load_store_atomics(i8* %addr) {
1462 ; CHECK-LABEL: name: test_load_store_atomics
1463 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1464 ; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered (s8) from %ir.addr)
1465 ; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic (s8) into %ir.addr)
1466 ; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire (s8) from %ir.addr)
1467 ; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release (s8) into %ir.addr)
1468 ; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst (s8) from %ir.addr)
1469 ; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic (s8) into %ir.addr)
1470 %v0 = load atomic i8, i8* %addr unordered, align 1
1471 store atomic i8 %v0, i8* %addr monotonic, align 1
1473 %v1 = load atomic i8, i8* %addr acquire, align 1
1474 store atomic i8 %v1, i8* %addr release, align 1
1476 %v2 = load atomic i8, i8* %addr syncscope("singlethread") seq_cst, align 1
1477 store atomic i8 %v2, i8* %addr syncscope("singlethread") monotonic, align 1
1482 define float @test_fneg_f32(float %x) {
1483 ; CHECK-LABEL: name: test_fneg_f32
1484 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $s0
1485 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FNEG [[ARG]]
1486 ; CHECK: $s0 = COPY [[RES]](s32)
1487 %neg = fneg float %x
1491 define float @test_fneg_f32_fmf(float %x) {
1492 ; CHECK-LABEL: name: test_fneg_f32
1493 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $s0
1494 ; CHECK: [[RES:%[0-9]+]]:_(s32) = nnan ninf nsz arcp contract afn reassoc G_FNEG [[ARG]]
1495 ; CHECK: $s0 = COPY [[RES]](s32)
1496 %neg = fneg fast float %x
1500 define double @test_fneg_f64(double %x) {
1501 ; CHECK-LABEL: name: test_fneg_f64
1502 ; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY $d0
1503 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FNEG [[ARG]]
1504 ; CHECK: $d0 = COPY [[RES]](s64)
1505 %neg = fneg double %x
1509 define double @test_fneg_f64_fmf(double %x) {
1510 ; CHECK-LABEL: name: test_fneg_f64
1511 ; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY $d0
1512 ; CHECK: [[RES:%[0-9]+]]:_(s64) = nnan ninf nsz arcp contract afn reassoc G_FNEG [[ARG]]
1513 ; CHECK: $d0 = COPY [[RES]](s64)
1514 %neg = fneg fast double %x
1518 define void @test_trivial_inlineasm() {
1519 ; CHECK-LABEL: name: test_trivial_inlineasm
1520 ; CHECK: INLINEASM &wibble, 1
1521 ; CHECK: INLINEASM &wibble, 0
1522 call void asm sideeffect "wibble", ""()
1523 call void asm "wibble", ""()
1527 define <2 x i32> @test_insertelement(<2 x i32> %vec, i32 %elt, i32 %idx){
1528 ; CHECK-LABEL: name: test_insertelement
1529 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1530 ; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0
1531 ; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w1
1532 ; CHECK: [[RES:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[VEC]], [[ELT]](s32), [[IDX]](s32)
1533 ; CHECK: $d0 = COPY [[RES]](<2 x s32>)
1534 %res = insertelement <2 x i32> %vec, i32 %elt, i32 %idx
1538 define i32 @test_extractelement(<2 x i32> %vec, i32 %idx) {
1539 ; CHECK-LABEL: name: test_extractelement
1540 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1541 ; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w0
1542 ; CHECK: [[IDXEXT:%[0-9]+]]:_(s64) = G_SEXT [[IDX]]
1543 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDXEXT]](s64)
1544 ; CHECK: $w0 = COPY [[RES]](s32)
1545 %res = extractelement <2 x i32> %vec, i32 %idx
1549 define i32 @test_extractelement_const_idx(<2 x i32> %vec) {
1550 ; CHECK-LABEL: name: test_extractelement
1551 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1552 ; CHECK: [[IDX:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
1553 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDX]](s64)
1554 ; CHECK: $w0 = COPY [[RES]](s32)
1555 %res = extractelement <2 x i32> %vec, i32 1
1559 define i32 @test_singleelementvector(i32 %elt){
1560 ; CHECK-LABEL: name: test_singleelementvector
1561 ; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0
1562 ; CHECK-NOT: G_INSERT_VECTOR_ELT
1563 ; CHECK-NOT: G_EXTRACT_VECTOR_ELT
1564 ; CHECK: $w0 = COPY [[ELT]](s32)
1565 %vec = insertelement <1 x i32> undef, i32 %elt, i32 0
1566 %res = extractelement <1 x i32> %vec, i32 0
1570 define <2 x i32> @test_constantaggzerovector_v2i32() {
1571 ; CHECK-LABEL: name: test_constantaggzerovector_v2i32
1572 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1573 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32)
1574 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1575 ret <2 x i32> zeroinitializer
1578 define <2 x float> @test_constantaggzerovector_v2f32() {
1579 ; CHECK-LABEL: name: test_constantaggzerovector_v2f32
1580 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
1581 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32)
1582 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1583 ret <2 x float> zeroinitializer
1586 define i32 @test_constantaggzerovector_v3i32() {
1587 ; CHECK-LABEL: name: test_constantaggzerovector_v3i32
1588 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1589 ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32), [[ZERO]](s32)
1590 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
1591 %elt = extractelement <3 x i32> zeroinitializer, i32 1
1595 define <2 x i32> @test_constantdatavector_v2i32() {
1596 ; CHECK-LABEL: name: test_constantdatavector_v2i32
1597 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1598 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1599 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32)
1600 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1601 ret <2 x i32> <i32 1, i32 2>
1604 define i32 @test_constantdatavector_v3i32() {
1605 ; CHECK-LABEL: name: test_constantdatavector_v3i32
1606 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1607 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1608 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
1609 ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C3]](s32)
1610 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
1611 %elt = extractelement <3 x i32> <i32 1, i32 2, i32 3>, i32 1
1615 define <4 x i32> @test_constantdatavector_v4i32() {
1616 ; CHECK-LABEL: name: test_constantdatavector_v4i32
1617 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1618 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1619 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
1620 ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1621 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32)
1622 ; CHECK: $q0 = COPY [[VEC]](<4 x s32>)
1623 ret <4 x i32> <i32 1, i32 2, i32 3, i32 4>
1626 define <2 x double> @test_constantdatavector_v2f64() {
1627 ; CHECK-LABEL: name: test_constantdatavector_v2f64
1628 ; CHECK: [[FC1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
1629 ; CHECK: [[FC2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1630 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FC1]](s64), [[FC2]](s64)
1631 ; CHECK: $q0 = COPY [[VEC]](<2 x s64>)
1632 ret <2 x double> <double 1.0, double 2.0>
1635 define i32 @test_constantaggzerovector_v1s32(i32 %arg){
1636 ; CHECK-LABEL: name: test_constantaggzerovector_v1s32
1637 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
1638 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1639 ; CHECK-NOT: G_MERGE_VALUES
1640 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C0]]
1641 ; CHECK-NOT: G_MERGE_VALUES
1642 ; CHECK: G_ADD [[ARG]], [[COPY]]
1643 %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
1644 %add = add <1 x i32> %vec, zeroinitializer
1645 %res = extractelement <1 x i32> %add, i32 0
1649 define i32 @test_constantdatavector_v1s32(i32 %arg){
1650 ; CHECK-LABEL: name: test_constantdatavector_v1s32
1651 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
1652 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1653 ; CHECK-NOT: G_MERGE_VALUES
1654 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C0]]
1655 ; CHECK-NOT: G_MERGE_VALUES
1656 ; CHECK: G_ADD [[ARG]], [[COPY]]
1657 %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
1658 %add = add <1 x i32> %vec, <i32 1>
1659 %res = extractelement <1 x i32> %add, i32 0
1663 declare ghccc float @different_call_conv_target(float %x)
1664 define float @test_different_call_conv_target(float %x) {
1665 ; CHECK-LABEL: name: test_different_call_conv
1666 ; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $s0
1667 ; CHECK: $s8 = COPY [[X]]
1668 ; CHECK: BL @different_call_conv_target, csr_aarch64_noregs, implicit-def $lr, implicit $sp, implicit $s8, implicit-def $s0
1669 %res = call ghccc float @different_call_conv_target(float %x)
1673 define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
1674 ; CHECK-LABEL: name: test_shufflevector_s32_v2s32
1675 ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
1676 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1677 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], shufflemask(0, 0)
1678 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1679 %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
1680 %res = shufflevector <1 x i32> %vec, <1 x i32> undef, <2 x i32> zeroinitializer
1684 define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
1685 ; CHECK-LABEL: name: test_shufflevector_v2s32_s32
1686 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1687 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(1)
1688 ; CHECK: $w0 = COPY [[RES]](s32)
1689 %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <1 x i32> <i32 1>
1690 %res = extractelement <1 x i32> %vec, i32 0
1694 define <2 x i32> @test_shufflevector_v2s32_v2s32_undef(<2 x i32> %arg) {
1695 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32_undef
1696 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1697 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1698 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(undef, undef)
1699 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1700 %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> undef
1704 define <2 x i32> @test_shufflevector_v2s32_v2s32_undef_0(<2 x i32> %arg) {
1705 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32_undef_0
1706 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1707 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1708 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(undef, 0)
1709 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1710 %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> <i32 undef, i32 0>
1714 define <2 x i32> @test_shufflevector_v2s32_v2s32_0_undef(<2 x i32> %arg) {
1715 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32_0_undef
1716 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1717 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1718 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(0, undef)
1719 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1720 %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> <i32 0, i32 undef>
1724 define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) {
1725 ; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
1726 ; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1727 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
1728 ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], shufflemask(1, 0, 1)
1729 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>)
1730 %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
1731 %res = extractelement <3 x i32> %vec, i32 0
1735 define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg2) {
1736 ; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
1737 ; CHECK: [[ARG1:%[0-9]+]]:_(<2 x s32>) = COPY $d0
1738 ; CHECK: [[ARG2:%[0-9]+]]:_(<2 x s32>) = COPY $d1
1739 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[ARG1]](<2 x s32>), [[ARG2]], shufflemask(0, 1, 2, 3)
1740 ; CHECK: $q0 = COPY [[VEC]](<4 x s32>)
1741 %res = shufflevector <2 x i32> %arg1, <2 x i32> %arg2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
1745 define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) {
1746 ; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
1747 ; CHECK: [[ARG:%[0-9]+]]:_(<4 x s32>) = COPY $q0
1748 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
1749 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<4 x s32>), [[UNDEF]], shufflemask(1, 3)
1750 ; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
1751 %res = shufflevector <4 x i32> %arg, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
1756 define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) {
1757 ; CHECK-LABEL: name: test_shufflevector_v8s8_v16s8
1758 ; CHECK: [[ARG1:%[0-9]+]]:_(<8 x s8>) = COPY $d0
1759 ; CHECK: [[ARG2:%[0-9]+]]:_(<8 x s8>) = COPY $d1
1760 ; CHECK: [[VEC:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[ARG1]](<8 x s8>), [[ARG2]], shufflemask(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15)
1761 ; CHECK: $q0 = COPY [[VEC]](<16 x s8>)
1762 %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
1766 ; CHECK-LABEL: test_constant_vector
1767 ; CHECK: [[UNDEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
1768 ; CHECK: [[F:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
1769 ; CHECK: [[M:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16)
1770 ; CHECK: $d0 = COPY [[M]](<4 x s16>)
1771 define <4 x half> @test_constant_vector() {
1772 ret <4 x half> <half undef, half undef, half undef, half 0xH3C00>
1775 define i32 @test_target_mem_intrinsic(i32* %addr) {
1776 ; CHECK-LABEL: name: test_target_mem_intrinsic
1777 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1778 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load (s32) from %ir.addr)
1779 ; CHECK: G_TRUNC [[VAL]](s64)
1780 %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
1781 %trunc = trunc i64 %val to i32
1785 declare i64 @llvm.aarch64.ldxr.p0i32(i32*) nounwind
1787 %zerosize_type = type {}
1789 define %zerosize_type @test_empty_load_store(%zerosize_type *%ptr, %zerosize_type %in) noinline optnone {
1790 ; CHECK-LABEL: name: test_empty_load_store
1791 ; CHECK-NOT: G_STORE
1793 ; CHECK: RET_ReallyLR
1795 store %zerosize_type undef, %zerosize_type* undef, align 4
1796 %val = load %zerosize_type, %zerosize_type* %ptr, align 4
1797 ret %zerosize_type %in
1801 define i64 @test_phi_loop(i32 %n) {
1802 ; CHECK-LABEL: name: test_phi_loop
1803 ; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
1804 ; CHECK: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1805 ; CHECK: [[CST2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1806 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
1807 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
1809 ; CHECK: [[PN1:%[0-9]+]]:_(s32) = G_PHI [[ARG1]](s32), %bb.1, [[SUB:%[0-9]+]](s32), %bb.2
1810 ; CHECK: [[PN2:%[0-9]+]]:_(s64) = G_PHI [[CST3]](s64), %bb.1, [[PN3:%[0-9]+]](s64), %bb.2
1811 ; CHECK: [[PN3]]:_(s64) = G_PHI [[CST4]](s64), %bb.1, [[ADD:%[0-9]+]](s64), %bb.2
1812 ; CHECK: [[ADD]]:_(s64) = G_ADD [[PN2]], [[PN3]]
1813 ; CHECK: [[SUB]]:_(s32) = G_SUB [[PN1]], [[CST1]]
1814 ; CHECK: [[CMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PN1]](s32), [[CST2]]
1815 ; CHECK: G_BRCOND [[CMP]](s1), %bb.3
1818 ; CHECK: $x0 = COPY [[PN2]](s64)
1819 ; CHECK: RET_ReallyLR implicit $x0
1824 %counter = phi i32 [ %n, %entry ], [ %counter.dec, %loop ]
1825 %elem = phi { i64, i64 } [ { i64 0, i64 1 }, %entry ], [ %updated, %loop ]
1826 %prev = extractvalue { i64, i64 } %elem, 0
1827 %curr = extractvalue { i64, i64 } %elem, 1
1828 %next = add i64 %prev, %curr
1829 %shifted = insertvalue { i64, i64 } %elem, i64 %curr, 0
1830 %updated = insertvalue { i64, i64 } %shifted, i64 %next, 1
1831 %counter.dec = sub i32 %counter, 1
1832 %cond = icmp sle i32 %counter, 0
1833 br i1 %cond, label %exit, label %loop
1836 %res = extractvalue { i64, i64 } %elem, 0
1840 define void @test_phi_diamond({ i8, i16, i32 }* %a.ptr, { i8, i16, i32 }* %b.ptr, i1 %selector, { i8, i16, i32 }* %dst) {
1841 ; CHECK-LABEL: name: test_phi_diamond
1842 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
1843 ; CHECK: [[ARG2:%[0-9]+]]:_(p0) = COPY $x1
1844 ; CHECK: [[ARG3:%[0-9]+]]:_(s32) = COPY $w2
1845 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ARG3]](s32)
1846 ; CHECK: [[ARG4:%[0-9]+]]:_(p0) = COPY $x3
1847 ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.2
1850 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load (s8) from %ir.a.ptr, align 4)
1851 ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
1852 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST1]](s64)
1853 ; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load (s16) from %ir.a.ptr + 2)
1854 ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1855 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST2]](s64)
1856 ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.a.ptr + 4)
1859 ; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load (s8) from %ir.b.ptr, align 4)
1860 ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
1861 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST3]](s64)
1862 ; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load (s16) from %ir.b.ptr + 2)
1863 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1864 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST4]](s64)
1865 ; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.b.ptr + 4)
1867 ; CHECK: [[PN1:%[0-9]+]]:_(s8) = G_PHI [[LD1]](s8), %bb.2, [[LD4]](s8), %bb.3
1868 ; CHECK: [[PN2:%[0-9]+]]:_(s16) = G_PHI [[LD2]](s16), %bb.2, [[LD5]](s16), %bb.3
1869 ; CHECK: [[PN3:%[0-9]+]]:_(s32) = G_PHI [[LD3]](s32), %bb.2, [[LD6]](s32), %bb.3
1870 ; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store (s8) into %ir.dst, align 4)
1871 ; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
1872 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST5]](s64)
1873 ; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store (s16) into %ir.dst + 2)
1874 ; CHECK: [[CST6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1875 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST6]](s64)
1876 ; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store (s32) into %ir.dst + 4)
1877 ; CHECK: RET_ReallyLR
1880 br i1 %selector, label %store.a, label %store.b
1883 %a = load { i8, i16, i32 }, { i8, i16, i32 }* %a.ptr
1887 %b = load { i8, i16, i32 }, { i8, i16, i32 }* %b.ptr
1891 %v = phi { i8, i16, i32 } [ %a, %store.a ], [ %b, %store.b ]
1892 store { i8, i16, i32 } %v, { i8, i16, i32 }* %dst
1896 %agg.inner.inner = type {i64, i64}
1897 %agg.inner = type {i16, i8, %agg.inner.inner }
1898 %agg.nested = type {i32, i32, %agg.inner, i32}
1900 define void @test_nested_aggregate_const(%agg.nested *%ptr) {
1901 ; CHECK-LABEL: name: test_nested_aggregate_const
1902 ; CHECK: [[BASE:%[0-9]+]]:_(p0) = COPY $x0
1903 ; CHECK: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1904 ; CHECK: [[CST2:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
1905 ; CHECK: [[CST3:%[0-9]+]]:_(s8) = G_CONSTANT i8 3
1906 ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
1907 ; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1908 ; CHECK: [[CST6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
1909 ; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store (s32) into %ir.ptr, align 8)
1910 ; CHECK: [[CST7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1911 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST7]](s64)
1912 ; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store (s32) into %ir.ptr + 4)
1913 ; CHECK: [[CST8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1914 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST8]](s64)
1915 ; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store (s16) into %ir.ptr + 8, align 8)
1916 ; CHECK: [[CST9:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
1917 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST9]](s64)
1918 ; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store (s8) into %ir.ptr + 10, align 2)
1919 ; CHECK: [[CST10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
1920 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST10]](s64)
1921 ; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store (s64) into %ir.ptr + 16)
1922 ; CHECK: [[CST11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
1923 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST11]](s64)
1924 ; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store (s64) into %ir.ptr + 24)
1925 ; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
1926 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST12]](s64)
1927 ; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store (s32) into %ir.ptr + 32, align 8)
1928 store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, %agg.nested *%ptr
1932 define i1 @return_i1_zext() {
1933 ; AAPCS ABI says that booleans can only be 1 or 0, so we need to zero-extend.
1934 ; CHECK-LABEL: name: return_i1_zext
1935 ; CHECK: [[CST:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
1936 ; CHECK: [[ZEXT:%[0-9]+]]:_(s8) = G_ZEXT [[CST]](s1)
1937 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s8)
1938 ; CHECK: $w0 = COPY [[ANYEXT]](s32)
1939 ; CHECK: RET_ReallyLR implicit $w0
1944 define i32 @test_atomic_cmpxchg_1(i32* %addr) {
1945 ; CHECK-LABEL: name: test_atomic_cmpxchg_1
1946 ; CHECK: bb.1.entry:
1947 ; CHECK-NEXT: successors: %bb.{{[^)]+}}
1948 ; CHECK-NEXT: liveins: $x0
1949 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1950 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1951 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1952 ; CHECK: bb.2.repeat:
1953 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
1954 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic (s32) on %ir.addr)
1955 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
1956 ; CHECK-NEXT: G_BR %bb.2
1961 %val_success = cmpxchg i32* %addr, i32 0, i32 1 monotonic monotonic
1962 %value_loaded = extractvalue { i32, i1 } %val_success, 0
1963 %success = extractvalue { i32, i1 } %val_success, 1
1964 br i1 %success, label %done, label %repeat
1966 ret i32 %value_loaded
1970 define i32 @test_weak_atomic_cmpxchg_1(i32* %addr) {
1971 ; CHECK-LABEL: name: test_weak_atomic_cmpxchg_1
1972 ; CHECK: bb.1.entry:
1973 ; CHECK-NEXT: successors: %bb.{{[^)]+}}
1974 ; CHECK-NEXT: liveins: $x0
1975 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
1976 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1977 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1978 ; CHECK: bb.2.repeat:
1979 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
1980 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic (s32) on %ir.addr)
1981 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
1982 ; CHECK-NEXT: G_BR %bb.2
1987 %val_success = cmpxchg weak i32* %addr, i32 0, i32 1 monotonic monotonic
1988 %value_loaded = extractvalue { i32, i1 } %val_success, 0
1989 %success = extractvalue { i32, i1 } %val_success, 1
1990 br i1 %success, label %done, label %repeat
1992 ret i32 %value_loaded
1995 ; Try one cmpxchg with a small type and high atomic ordering.
1996 define i16 @test_atomic_cmpxchg_2(i16* %addr) {
1997 ; CHECK-LABEL: name: test_atomic_cmpxchg_2
1998 ; CHECK: bb.1.entry:
1999 ; CHECK-NEXT: successors: %bb.2({{[^)]+}})
2000 ; CHECK-NEXT: liveins: $x0
2001 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2002 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
2003 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
2004 ; CHECK: bb.2.repeat:
2005 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
2006 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s16), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst seq_cst (s16) on %ir.addr)
2007 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
2008 ; CHECK-NEXT: G_BR %bb.2
2013 %val_success = cmpxchg i16* %addr, i16 0, i16 1 seq_cst seq_cst
2014 %value_loaded = extractvalue { i16, i1 } %val_success, 0
2015 %success = extractvalue { i16, i1 } %val_success, 1
2016 br i1 %success, label %done, label %repeat
2018 ret i16 %value_loaded
2021 ; Try one cmpxchg where the success order and failure order differ.
2022 define i64 @test_atomic_cmpxchg_3(i64* %addr) {
2023 ; CHECK-LABEL: name: test_atomic_cmpxchg_3
2024 ; CHECK: bb.1.entry:
2025 ; CHECK-NEXT: successors: %bb.2({{[^)]+}})
2026 ; CHECK-NEXT: liveins: $x0
2027 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2028 ; CHECK-NEXT: [[OLDVAL:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
2029 ; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
2030 ; CHECK: bb.2.repeat:
2031 ; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
2032 ; CHECK: [[OLDVALRES:%[0-9]+]]:_(s64), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst acquire (s64) on %ir.addr)
2033 ; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
2034 ; CHECK-NEXT: G_BR %bb.2
2039 %val_success = cmpxchg i64* %addr, i64 0, i64 1 seq_cst acquire
2040 %value_loaded = extractvalue { i64, i1 } %val_success, 0
2041 %success = extractvalue { i64, i1 } %val_success, 1
2042 br i1 %success, label %done, label %repeat
2044 ret i64 %value_loaded
2047 ; Try a monotonic atomicrmw xchg
2048 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2049 define i32 @test_atomicrmw_xchg(i256* %addr) {
2050 ; CHECK-LABEL: name: test_atomicrmw_xchg
2051 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2052 ; CHECK-NEXT: liveins: $x0
2053 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2054 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2055 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XCHG [[ADDR]](p0), [[VAL]] :: (load store monotonic (s256) on %ir.addr)
2056 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2057 %oldval = atomicrmw xchg i256* %addr, i256 1 monotonic
2058 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2059 ; test so work around it by truncating to i32 for now.
2060 %oldval.trunc = trunc i256 %oldval to i32
2061 ret i32 %oldval.trunc
2064 ; Try an acquire atomicrmw add
2065 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2066 define i32 @test_atomicrmw_add(i256* %addr) {
2067 ; CHECK-LABEL: name: test_atomicrmw_add
2068 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2069 ; CHECK-NEXT: liveins: $x0
2070 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2071 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2072 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_ADD [[ADDR]](p0), [[VAL]] :: (load store acquire (s256) on %ir.addr)
2073 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2074 %oldval = atomicrmw add i256* %addr, i256 1 acquire
2075 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2076 ; test so work around it by truncating to i32 for now.
2077 %oldval.trunc = trunc i256 %oldval to i32
2078 ret i32 %oldval.trunc
2081 ; Try a release atomicrmw sub
2082 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2083 define i32 @test_atomicrmw_sub(i256* %addr) {
2084 ; CHECK-LABEL: name: test_atomicrmw_sub
2085 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2086 ; CHECK-NEXT: liveins: $x0
2087 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2088 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2089 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_SUB [[ADDR]](p0), [[VAL]] :: (load store release (s256) on %ir.addr)
2090 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2091 %oldval = atomicrmw sub i256* %addr, i256 1 release
2092 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2093 ; test so work around it by truncating to i32 for now.
2094 %oldval.trunc = trunc i256 %oldval to i32
2095 ret i32 %oldval.trunc
2098 ; Try an acq_rel atomicrmw and
2099 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2100 define i32 @test_atomicrmw_and(i256* %addr) {
2101 ; CHECK-LABEL: name: test_atomicrmw_and
2102 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2103 ; CHECK-NEXT: liveins: $x0
2104 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2105 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2106 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_AND [[ADDR]](p0), [[VAL]] :: (load store acq_rel (s256) on %ir.addr)
2107 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2108 %oldval = atomicrmw and i256* %addr, i256 1 acq_rel
2109 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2110 ; test so work around it by truncating to i32 for now.
2111 %oldval.trunc = trunc i256 %oldval to i32
2112 ret i32 %oldval.trunc
2115 ; Try an seq_cst atomicrmw nand
2116 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2117 define i32 @test_atomicrmw_nand(i256* %addr) {
2118 ; CHECK-LABEL: name: test_atomicrmw_nand
2119 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2120 ; CHECK-NEXT: liveins: $x0
2121 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2122 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2123 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_NAND [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2124 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2125 %oldval = atomicrmw nand i256* %addr, i256 1 seq_cst
2126 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2127 ; test so work around it by truncating to i32 for now.
2128 %oldval.trunc = trunc i256 %oldval to i32
2129 ret i32 %oldval.trunc
2132 ; Try an seq_cst atomicrmw or
2133 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2134 define i32 @test_atomicrmw_or(i256* %addr) {
2135 ; CHECK-LABEL: name: test_atomicrmw_or
2136 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2137 ; CHECK-NEXT: liveins: $x0
2138 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2139 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2140 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_OR [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2141 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2142 %oldval = atomicrmw or i256* %addr, i256 1 seq_cst
2143 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2144 ; test so work around it by truncating to i32 for now.
2145 %oldval.trunc = trunc i256 %oldval to i32
2146 ret i32 %oldval.trunc
2149 ; Try an seq_cst atomicrmw xor
2150 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2151 define i32 @test_atomicrmw_xor(i256* %addr) {
2152 ; CHECK-LABEL: name: test_atomicrmw_xor
2153 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2154 ; CHECK-NEXT: liveins: $x0
2155 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2156 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2157 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XOR [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2158 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2159 %oldval = atomicrmw xor i256* %addr, i256 1 seq_cst
2160 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2161 ; test so work around it by truncating to i32 for now.
2162 %oldval.trunc = trunc i256 %oldval to i32
2163 ret i32 %oldval.trunc
2166 ; Try an seq_cst atomicrmw min
2167 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2168 define i32 @test_atomicrmw_min(i256* %addr) {
2169 ; CHECK-LABEL: name: test_atomicrmw_min
2170 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2171 ; CHECK-NEXT: liveins: $x0
2172 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2173 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2174 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2175 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2176 %oldval = atomicrmw min i256* %addr, i256 1 seq_cst
2177 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2178 ; test so work around it by truncating to i32 for now.
2179 %oldval.trunc = trunc i256 %oldval to i32
2180 ret i32 %oldval.trunc
2183 ; Try an seq_cst atomicrmw max
2184 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2185 define i32 @test_atomicrmw_max(i256* %addr) {
2186 ; CHECK-LABEL: name: test_atomicrmw_max
2187 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2188 ; CHECK-NEXT: liveins: $x0
2189 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2190 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2191 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2192 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2193 %oldval = atomicrmw max i256* %addr, i256 1 seq_cst
2194 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2195 ; test so work around it by truncating to i32 for now.
2196 %oldval.trunc = trunc i256 %oldval to i32
2197 ret i32 %oldval.trunc
2200 ; Try an seq_cst atomicrmw unsigned min
2201 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2202 define i32 @test_atomicrmw_umin(i256* %addr) {
2203 ; CHECK-LABEL: name: test_atomicrmw_umin
2204 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2205 ; CHECK-NEXT: liveins: $x0
2206 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2207 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2208 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2209 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2210 %oldval = atomicrmw umin i256* %addr, i256 1 seq_cst
2211 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2212 ; test so work around it by truncating to i32 for now.
2213 %oldval.trunc = trunc i256 %oldval to i32
2214 ret i32 %oldval.trunc
2217 ; Try an seq_cst atomicrmw unsigned max
2218 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
2219 define i32 @test_atomicrmw_umax(i256* %addr) {
2220 ; CHECK-LABEL: name: test_atomicrmw_umax
2221 ; CHECK: bb.1 (%ir-block.{{[0-9]+}}):
2222 ; CHECK-NEXT: liveins: $x0
2223 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
2224 ; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
2225 ; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
2226 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
2227 %oldval = atomicrmw umax i256* %addr, i256 1 seq_cst
2228 ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
2229 ; test so work around it by truncating to i32 for now.
2230 %oldval.trunc = trunc i256 %oldval to i32
2231 ret i32 %oldval.trunc
2234 @addr = global i8* null
2236 define void @test_blockaddress() {
2237 ; CHECK-LABEL: name: test_blockaddress
2238 ; CHECK: [[BADDR:%[0-9]+]]:_(p0) = G_BLOCK_ADDR blockaddress(@test_blockaddress, %ir-block.block)
2239 ; CHECK: G_STORE [[BADDR]](p0)
2240 store i8* blockaddress(@test_blockaddress, %block), i8** @addr
2241 indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
2247 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) readonly nounwind
2248 declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
2249 define void @test_invariant_intrin() {
2250 ; CHECK-LABEL: name: test_invariant_intrin
2251 ; CHECK: %{{[0-9]+}}:_(s64) = G_IMPLICIT_DEF
2252 ; CHECK-NEXT: RET_ReallyLR
2254 %y = bitcast %t* %x to i8*
2255 %inv = call {}* @llvm.invariant.start.p0i8(i64 8, i8* %y)
2256 call void @llvm.invariant.end.p0i8({}* %inv, i64 8, i8* %y)
2260 declare float @llvm.ceil.f32(float)
2261 define float @test_ceil_f32(float %x) {
2262 ; CHECK-LABEL: name: test_ceil_f32
2263 ; CHECK: %{{[0-9]+}}:_(s32) = G_FCEIL %{{[0-9]+}}
2264 %y = call float @llvm.ceil.f32(float %x)
2268 declare double @llvm.ceil.f64(double)
2269 define double @test_ceil_f64(double %x) {
2270 ; CHECK-LABEL: name: test_ceil_f64
2271 ; CHECK: %{{[0-9]+}}:_(s64) = G_FCEIL %{{[0-9]+}}
2272 %y = call double @llvm.ceil.f64(double %x)
2276 declare <2 x float> @llvm.ceil.v2f32(<2 x float>)
2277 define <2 x float> @test_ceil_v2f32(<2 x float> %x) {
2278 ; CHECK-LABEL: name: test_ceil_v2f32
2279 ; CHECK: %{{[0-9]+}}:_(<2 x s32>) = G_FCEIL %{{[0-9]+}}
2280 %y = call <2 x float> @llvm.ceil.v2f32(<2 x float> %x)
2284 declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
2285 define <4 x float> @test_ceil_v4f32(<4 x float> %x) {
2286 ; CHECK-LABEL: name: test_ceil_v4f32
2287 ; CHECK: %{{[0-9]+}}:_(<4 x s32>) = G_FCEIL %{{[0-9]+}}
2288 ; SELECT: %{{[0-9]+}}:fpr128 = FRINTPv4f32 %{{[0-9]+}}
2289 %y = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x)
2293 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
2294 define <2 x double> @test_ceil_v2f64(<2 x double> %x) {
2295 ; CHECK-LABEL: name: test_ceil_v2f64
2296 ; CHECK: %{{[0-9]+}}:_(<2 x s64>) = G_FCEIL %{{[0-9]+}}
2297 %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x)
2301 declare float @llvm.cos.f32(float)
2302 define float @test_cos_f32(float %x) {
2303 ; CHECK-LABEL: name: test_cos_f32
2304 ; CHECK: %{{[0-9]+}}:_(s32) = G_FCOS %{{[0-9]+}}
2305 %y = call float @llvm.cos.f32(float %x)
2309 declare float @llvm.sin.f32(float)
2310 define float @test_sin_f32(float %x) {
2311 ; CHECK-LABEL: name: test_sin_f32
2312 ; CHECK: %{{[0-9]+}}:_(s32) = G_FSIN %{{[0-9]+}}
2313 %y = call float @llvm.sin.f32(float %x)
2317 declare float @llvm.sqrt.f32(float)
2318 define float @test_sqrt_f32(float %x) {
2319 ; CHECK-LABEL: name: test_sqrt_f32
2320 ; CHECK: %{{[0-9]+}}:_(s32) = G_FSQRT %{{[0-9]+}}
2321 %y = call float @llvm.sqrt.f32(float %x)
2325 declare float @llvm.floor.f32(float)
2326 define float @test_floor_f32(float %x) {
2327 ; CHECK-LABEL: name: test_floor_f32
2328 ; CHECK: %{{[0-9]+}}:_(s32) = G_FFLOOR %{{[0-9]+}}
2329 %y = call float @llvm.floor.f32(float %x)
2333 declare float @llvm.nearbyint.f32(float)
2334 define float @test_nearbyint_f32(float %x) {
2335 ; CHECK-LABEL: name: test_nearbyint_f32
2336 ; CHECK: %{{[0-9]+}}:_(s32) = G_FNEARBYINT %{{[0-9]+}}
2337 %y = call float @llvm.nearbyint.f32(float %x)
2341 ; CHECK-LABEL: name: test_llvm.aarch64.neon.ld3.v4i32.p0i32
2342 ; CHECK: %1:_(<4 x s32>), %2:_(<4 x s32>), %3:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld3), %0(p0) :: (load (s384) from %ir.ptr, align 64)
2343 define void @test_llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr) {
2344 %arst = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr)
2348 declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*) #3
2350 define void @test_i1_arg_zext(void (i1)* %f) {
2351 ; CHECK-LABEL: name: test_i1_arg_zext
2352 ; CHECK: [[I1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
2353 ; CHECK: [[ZEXT0:%[0-9]+]]:_(s8) = G_ZEXT [[I1]](s1)
2354 ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ZEXT0]](s8)
2355 ; CHECK: $w0 = COPY [[ZEXT1]](s32)
2356 call void %f(i1 true)
2360 declare i8* @llvm.stacksave()
2361 declare void @llvm.stackrestore(i8*)
2362 define void @test_stacksaverestore() {
2363 ; CHECK-LABEL: name: test_stacksaverestore
2364 ; CHECK: [[SAVE:%[0-9]+]]:_(p0) = COPY $sp
2365 ; CHECK-NEXT: $sp = COPY [[SAVE]](p0)
2366 ; CHECK-NEXT: RET_ReallyLR
2367 %sp = call i8* @llvm.stacksave()
2368 call void @llvm.stackrestore(i8* %sp)
2372 declare float @llvm.rint.f32(float)
2373 define float @test_rint_f32(float %x) {
2374 ; CHECK-LABEL: name: test_rint_f32
2375 ; CHECK: %{{[0-9]+}}:_(s32) = G_FRINT %{{[0-9]+}}
2376 %y = call float @llvm.rint.f32(float %x)
2380 declare void @llvm.assume(i1)
2381 define void @test_assume(i1 %x) {
2382 ; CHECK-LABEL: name: test_assume
2383 ; CHECK-NOT: llvm.assume
2384 ; CHECK: RET_ReallyLR
2385 call void @llvm.assume(i1 %x)
2389 declare void @llvm.experimental.noalias.scope.decl(metadata)
2390 define void @test.llvm.noalias.scope.decl(i8* %P, i8* %Q) nounwind ssp {
2391 tail call void @llvm.experimental.noalias.scope.decl(metadata !3)
2392 ; CHECK-LABEL: name: test.llvm.noalias.scope.decl
2393 ; CHECK-NOT: llvm.experimental.noalias.scope.decl
2394 ; CHECK: RET_ReallyLR
2399 !4 = distinct !{ !4, !5, !"test1: var" }
2400 !5 = distinct !{ !5, !"test1" }
2403 declare void @llvm.sideeffect()
2404 define void @test_sideeffect() {
2405 ; CHECK-LABEL: name: test_sideeffect
2406 ; CHECK-NOT: llvm.sideeffect
2407 ; CHECK: RET_ReallyLR
2408 call void @llvm.sideeffect()
2412 declare void @llvm.var.annotation(i8*, i8*, i8*, i32, i8*)
2413 define void @test_var_annotation(i8*, i8*, i8*, i32) {
2414 ; CHECK-LABEL: name: test_var_annotation
2415 ; CHECK-NOT: llvm.var.annotation
2416 ; CHECK: RET_ReallyLR
2417 call void @llvm.var.annotation(i8* %0, i8* %1, i8* %2, i32 %3, i8* null)
2421 declare i64 @llvm.readcyclecounter()
2422 define i64 @test_readcyclecounter() {
2423 ; CHECK-LABEL: name: test_readcyclecounter
2424 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_READCYCLECOUNTER{{$}}
2425 ; CHECK-NEXT: $x0 = COPY [[RES]]
2426 ; CHECK-NEXT: RET_ReallyLR implicit $x0
2427 %res = call i64 @llvm.readcyclecounter()
2431 define i64 @test_freeze(i64 %a) {
2432 ; CHECK-LABEL: name: test_freeze
2433 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
2434 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_FREEZE [[COPY]]
2435 ; CHECK-NEXT: $x0 = COPY [[RES]]
2436 ; CHECK-NEXT: RET_ReallyLR implicit $x0
2437 %res = freeze i64 %a
2441 define {i8, i32} @test_freeze_struct({ i8, i32 }* %addr) {
2442 ; CHECK-LABEL: name: test_freeze_struct
2443 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
2444 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0)
2445 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
2446 ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]]
2447 ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0)
2448 ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s8) = G_FREEZE [[LOAD]]
2449 ; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[LOAD1]]
2450 ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FREEZE]]
2451 ; CHECK-NEXT: $w0 = COPY [[ANYEXT]]
2452 ; CHECK-NEXT: $w1 = COPY [[FREEZE1]]
2453 ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
2454 %load = load { i8, i32 }, { i8, i32 }* %addr
2455 %res = freeze {i8, i32} %load
2459 !0 = !{ i64 0, i64 2 }
2461 declare i64 @llvm.lround.i64.f32(float) nounwind readnone
2462 define i64 @lround(float %x) {
2463 ; CHECK-LABEL: name: lround
2464 ; CHECK: bb.1 (%ir-block.0):
2465 ; CHECK: liveins: $s0
2466 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
2467 ; CHECK: [[LROUND:%[0-9]+]]:_(s64) = G_LROUND [[COPY]](s32)
2468 ; CHECK: $x0 = COPY [[LROUND]](s64)
2469 ; CHECK: RET_ReallyLR implicit $x0
2470 %lround = tail call i64 @llvm.lround.i64.f32(float %x)