[AMDGPU] Update base addr of dyn alloca considering GrowingUp stack (#119822)
[llvm-project.git] / clang / test / CodeGen / AArch64 / ls64-inline-asm.c
blob8aa0684dba14d037b40408d2d137577476738295
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64 -target-feature +ls64 -O1 -emit-llvm -x c %s -o - | FileCheck %s
4 struct foo { unsigned long long x[8]; };
6 // CHECK-LABEL: @load(
7 // CHECK-NEXT: entry:
8 // CHECK-NEXT: [[TMP0:%.*]] = tail call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(ptr [[ADDR:%.*]]) #[[ATTR1:[0-9]+]], !srcloc [[META2:![0-9]+]]
9 // CHECK-NEXT: store i512 [[TMP0]], ptr [[OUTPUT:%.*]], align 8
10 // CHECK-NEXT: ret void
12 void load(struct foo *output, void *addr)
14 __asm__ volatile ("ld64b %0,[%1]" : "=r" (*output) : "r" (addr) : "memory");
17 // CHECK-LABEL: @store(
18 // CHECK-NEXT: entry:
19 // CHECK-NEXT: [[TMP0:%.*]] = load i512, ptr [[INPUT:%.*]], align 8
20 // CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[TMP0]], ptr [[ADDR:%.*]]) #[[ATTR1]], !srcloc [[META3:![0-9]+]]
21 // CHECK-NEXT: ret void
23 void store(const struct foo *input, void *addr)
25 __asm__ volatile ("st64b %0,[%1]" : : "r" (*input), "r" (addr) : "memory" );
28 // CHECK-LABEL: @store2(
29 // CHECK-NEXT: entry:
30 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[IN:%.*]], align 4, !tbaa [[TBAA4:![0-9]+]]
31 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
32 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 4
33 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4, !tbaa [[TBAA4]]
34 // CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[TMP1]] to i64
35 // CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 16
36 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !tbaa [[TBAA4]]
37 // CHECK-NEXT: [[CONV5:%.*]] = sext i32 [[TMP2]] to i64
38 // CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 64
39 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4, !tbaa [[TBAA4]]
40 // CHECK-NEXT: [[CONV8:%.*]] = sext i32 [[TMP3]] to i64
41 // CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 100
42 // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4, !tbaa [[TBAA4]]
43 // CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[TMP4]] to i64
44 // CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 144
45 // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX13]], align 4, !tbaa [[TBAA4]]
46 // CHECK-NEXT: [[CONV14:%.*]] = sext i32 [[TMP5]] to i64
47 // CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 196
48 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4, !tbaa [[TBAA4]]
49 // CHECK-NEXT: [[CONV17:%.*]] = sext i32 [[TMP6]] to i64
50 // CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds nuw i8, ptr [[IN]], i64 256
51 // CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX19]], align 4, !tbaa [[TBAA4]]
52 // CHECK-NEXT: [[CONV20:%.*]] = sext i32 [[TMP7]] to i64
53 // CHECK-NEXT: [[S_SROA_10_0_INSERT_EXT:%.*]] = zext i64 [[CONV20]] to i512
54 // CHECK-NEXT: [[S_SROA_10_0_INSERT_SHIFT:%.*]] = shl nuw i512 [[S_SROA_10_0_INSERT_EXT]], 448
55 // CHECK-NEXT: [[S_SROA_9_0_INSERT_EXT:%.*]] = zext i64 [[CONV17]] to i512
56 // CHECK-NEXT: [[S_SROA_9_0_INSERT_SHIFT:%.*]] = shl nuw nsw i512 [[S_SROA_9_0_INSERT_EXT]], 384
57 // CHECK-NEXT: [[S_SROA_9_0_INSERT_INSERT:%.*]] = or disjoint i512 [[S_SROA_10_0_INSERT_SHIFT]], [[S_SROA_9_0_INSERT_SHIFT]]
58 // CHECK-NEXT: [[S_SROA_8_0_INSERT_EXT:%.*]] = zext i64 [[CONV14]] to i512
59 // CHECK-NEXT: [[S_SROA_8_0_INSERT_SHIFT:%.*]] = shl nuw nsw i512 [[S_SROA_8_0_INSERT_EXT]], 320
60 // CHECK-NEXT: [[S_SROA_8_0_INSERT_INSERT:%.*]] = or disjoint i512 [[S_SROA_9_0_INSERT_INSERT]], [[S_SROA_8_0_INSERT_SHIFT]]
61 // CHECK-NEXT: [[S_SROA_7_0_INSERT_EXT:%.*]] = zext i64 [[CONV11]] to i512
62 // CHECK-NEXT: [[S_SROA_7_0_INSERT_SHIFT:%.*]] = shl nuw nsw i512 [[S_SROA_7_0_INSERT_EXT]], 256
63 // CHECK-NEXT: [[S_SROA_7_0_INSERT_INSERT:%.*]] = or disjoint i512 [[S_SROA_8_0_INSERT_INSERT]], [[S_SROA_7_0_INSERT_SHIFT]]
64 // CHECK-NEXT: [[S_SROA_6_0_INSERT_EXT:%.*]] = zext i64 [[CONV8]] to i512
65 // CHECK-NEXT: [[S_SROA_6_0_INSERT_SHIFT:%.*]] = shl nuw nsw i512 [[S_SROA_6_0_INSERT_EXT]], 192
66 // CHECK-NEXT: [[S_SROA_6_0_INSERT_INSERT:%.*]] = or disjoint i512 [[S_SROA_7_0_INSERT_INSERT]], [[S_SROA_6_0_INSERT_SHIFT]]
67 // CHECK-NEXT: [[S_SROA_5_0_INSERT_EXT:%.*]] = zext i64 [[CONV5]] to i512
68 // CHECK-NEXT: [[S_SROA_5_0_INSERT_SHIFT:%.*]] = shl nuw nsw i512 [[S_SROA_5_0_INSERT_EXT]], 128
69 // CHECK-NEXT: [[S_SROA_4_0_INSERT_EXT:%.*]] = zext i64 [[CONV2]] to i512
70 // CHECK-NEXT: [[S_SROA_4_0_INSERT_SHIFT:%.*]] = shl nuw nsw i512 [[S_SROA_4_0_INSERT_EXT]], 64
71 // CHECK-NEXT: [[S_SROA_4_0_INSERT_MASK:%.*]] = or disjoint i512 [[S_SROA_6_0_INSERT_INSERT]], [[S_SROA_5_0_INSERT_SHIFT]]
72 // CHECK-NEXT: [[S_SROA_0_0_INSERT_EXT:%.*]] = zext i64 [[CONV]] to i512
73 // CHECK-NEXT: [[S_SROA_0_0_INSERT_MASK:%.*]] = or disjoint i512 [[S_SROA_4_0_INSERT_MASK]], [[S_SROA_4_0_INSERT_SHIFT]]
74 // CHECK-NEXT: [[S_SROA_0_0_INSERT_INSERT:%.*]] = or i512 [[S_SROA_0_0_INSERT_MASK]], [[S_SROA_0_0_INSERT_EXT]]
75 // CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[S_SROA_0_0_INSERT_INSERT]], ptr [[ADDR:%.*]]) #[[ATTR1]], !srcloc [[META8:![0-9]+]]
76 // CHECK-NEXT: ret void
78 void store2(int *in, void *addr)
80 struct foo s = { in[0], in[1], in[4], in[16], in[25], in[36], in[49], in[64] };
81 __asm__ volatile ("st64b %0,[%1]" : : "r" (s), "r" (addr) : "memory" );