Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / alloc-align-attr.c
blob7b0e716895f0a0d60a346e50a0c3e206dfcec2e7
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
4 __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
6 // Condition where parameter to m1 is not size_t.
7 // CHECK-LABEL: @test1(
8 // CHECK-NEXT: entry:
9 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
10 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
11 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
12 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m1(i32 noundef [[TMP0]])
13 // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64
14 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ]
15 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
16 // CHECK-NEXT: ret i32 [[TMP1]]
18 __INT32_TYPE__ test1(__INT32_TYPE__ a) {
19 return *m1(a);
21 // Condition where test2 param needs casting.
22 // CHECK-LABEL: @test2(
23 // CHECK-NEXT: entry:
24 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
25 // CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
26 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
27 // CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
28 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m1(i32 noundef [[CONV]])
29 // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64
30 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ]
31 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
32 // CHECK-NEXT: ret i32 [[TMP1]]
34 __INT32_TYPE__ test2(__SIZE_TYPE__ a) {
35 return *m1(a);
37 __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
39 // test3 param needs casting, but 'm2' is correct.
40 // CHECK-LABEL: @test3(
41 // CHECK-NEXT: entry:
42 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
43 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
44 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
45 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
46 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m2(i64 noundef [[CONV]])
47 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CONV]]) ]
48 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
49 // CHECK-NEXT: ret i32 [[TMP1]]
51 __INT32_TYPE__ test3(__INT32_TYPE__ a) {
52 return *m2(a);
55 // Every type matches, canonical example.
56 // CHECK-LABEL: @test4(
57 // CHECK-NEXT: entry:
58 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
59 // CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
60 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
61 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m2(i64 noundef [[TMP0]])
62 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[TMP0]]) ]
63 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
64 // CHECK-NEXT: ret i32 [[TMP1]]
66 __INT32_TYPE__ test4(__SIZE_TYPE__ a) {
67 return *m2(a);
71 struct Empty {};
72 struct MultiArgs { __INT64_TYPE__ a, b;};
73 // Struct parameter doesn't take up an IR parameter, 'i' takes up 2.
74 // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane.
75 __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)));
76 // CHECK-LABEL: @test5(
77 // CHECK-NEXT: entry:
78 // CHECK-NEXT: [[A:%.*]] = alloca i128, align 16
79 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16
80 // CHECK-NEXT: [[E:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1
81 // CHECK-NEXT: [[COERCE:%.*]] = alloca i128, align 16
82 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[A]], i32 0, i32 0
83 // CHECK-NEXT: store i64 [[A_COERCE0:%.*]], ptr [[TMP1]], align 16
84 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[A]], i32 0, i32 1
85 // CHECK-NEXT: store i64 [[A_COERCE1:%.*]], ptr [[TMP2]], align 8
86 // CHECK-NEXT: [[A1:%.*]] = load i128, ptr [[A]], align 16
87 // CHECK-NEXT: store i128 [[A1]], ptr [[A_ADDR]], align 16
88 // CHECK-NEXT: [[TMP3:%.*]] = load i128, ptr [[A_ADDR]], align 16
89 // CHECK-NEXT: store i128 [[TMP3]], ptr [[COERCE]], align 16
90 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[COERCE]], i32 0, i32 0
91 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 16
92 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[COERCE]], i32 0, i32 1
93 // CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP7]], align 8
94 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m3(i64 noundef [[TMP6]], i64 noundef [[TMP8]])
95 // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
96 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ]
97 // CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[CALL]], align 4
98 // CHECK-NEXT: ret i32 [[TMP9]]
100 __INT32_TYPE__ test5(__int128_t a) {
101 struct Empty e;
102 return *m3(e, a);
104 // Struct parameter takes up 2 parameters, 'i' takes up 2.
105 __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2)));
106 // CHECK-LABEL: @test6(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[A:%.*]] = alloca i128, align 16
109 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16
110 // CHECK-NEXT: [[E:%.*]] = alloca [[STRUCT_MULTIARGS:%.*]], align 8
111 // CHECK-NEXT: [[COERCE:%.*]] = alloca i128, align 16
112 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[A]], i32 0, i32 0
113 // CHECK-NEXT: store i64 [[A_COERCE0:%.*]], ptr [[TMP1]], align 16
114 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[A]], i32 0, i32 1
115 // CHECK-NEXT: store i64 [[A_COERCE1:%.*]], ptr [[TMP2]], align 8
116 // CHECK-NEXT: [[A1:%.*]] = load i128, ptr [[A]], align 16
117 // CHECK-NEXT: store i128 [[A1]], ptr [[A_ADDR]], align 16
118 // CHECK-NEXT: [[TMP3:%.*]] = load i128, ptr [[A_ADDR]], align 16
119 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[E]], i32 0, i32 0
120 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8
121 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[E]], i32 0, i32 1
122 // CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP7]], align 8
123 // CHECK-NEXT: store i128 [[TMP3]], ptr [[COERCE]], align 16
124 // CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[COERCE]], i32 0, i32 0
125 // CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 16
126 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[COERCE]], i32 0, i32 1
127 // CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
128 // CHECK-NEXT: [[CALL:%.*]] = call ptr @m4(i64 [[TMP6]], i64 [[TMP8]], i64 noundef [[TMP11]], i64 noundef [[TMP13]])
129 // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
130 // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[CALL]], i64 [[CASTED_ALIGN]]) ]
131 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[CALL]], align 4
132 // CHECK-NEXT: ret i32 [[TMP14]]
134 __INT32_TYPE__ test6(__int128_t a) {
135 struct MultiArgs e;
136 return *m4(e, a);