Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / builtin-counted-by-ref.c
blob8ad715879aa767f7ab8c5304419101dae39d8a93
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s --check-prefix=X86_64
3 // RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s --check-prefix=I386
5 struct a {
6 char x;
7 short count;
8 int array[] __attribute__((counted_by(count)));
9 };
11 // X86_64-LABEL: define dso_local ptr @test1(
12 // X86_64-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] {
13 // X86_64-NEXT: [[ENTRY:.*:]]
14 // X86_64-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
15 // X86_64-NEXT: [[P:%.*]] = alloca ptr, align 8
16 // X86_64-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
17 // X86_64-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
18 // X86_64-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
19 // X86_64-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]]
20 // X86_64-NEXT: [[ADD:%.*]] = add i64 4, [[MUL]]
21 // X86_64-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 noundef [[ADD]]) #[[ATTR2:[0-9]+]]
22 // X86_64-NEXT: store ptr [[CALL]], ptr [[P]], align 8
23 // X86_64-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
24 // X86_64-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP1]] to i16
25 // X86_64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 8
26 // X86_64-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[TMP2]], i32 0, i32 1
27 // X86_64-NEXT: store i16 [[CONV1]], ptr [[DOT_COUNTED_BY_GEP]], align 2
28 // X86_64-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 8
29 // X86_64-NEXT: ret ptr [[TMP3]]
31 // I386-LABEL: define dso_local ptr @test1(
32 // I386-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] {
33 // I386-NEXT: [[ENTRY:.*:]]
34 // I386-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
35 // I386-NEXT: [[P:%.*]] = alloca ptr, align 4
36 // I386-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
37 // I386-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
38 // I386-NEXT: [[MUL:%.*]] = mul i32 4, [[TMP0]]
39 // I386-NEXT: [[ADD:%.*]] = add i32 4, [[MUL]]
40 // I386-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 noundef [[ADD]]) #[[ATTR2:[0-9]+]]
41 // I386-NEXT: store ptr [[CALL]], ptr [[P]], align 4
42 // I386-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
43 // I386-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i16
44 // I386-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 4
45 // I386-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[TMP2]], i32 0, i32 1
46 // I386-NEXT: store i16 [[CONV]], ptr [[DOT_COUNTED_BY_GEP]], align 2
47 // I386-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 4
48 // I386-NEXT: ret ptr [[TMP3]]
50 struct a *test1(int size) {
51 struct a *p = __builtin_malloc(sizeof(struct a) + sizeof(int) * size);
53 *__builtin_counted_by_ref(p->array) = size;
54 return p;
57 struct b {
58 int _filler;
59 struct {
60 int __filler;
61 struct {
62 int ___filler;
63 struct {
64 char count;
68 struct {
69 int filler_;
70 struct {
71 int filler__;
72 struct {
73 long array[] __attribute__((counted_by(count)));
79 // X86_64-LABEL: define dso_local ptr @test2(
80 // X86_64-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
81 // X86_64-NEXT: [[ENTRY:.*:]]
82 // X86_64-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
83 // X86_64-NEXT: [[P:%.*]] = alloca ptr, align 8
84 // X86_64-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
85 // X86_64-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
86 // X86_64-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
87 // X86_64-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]]
88 // X86_64-NEXT: [[ADD:%.*]] = add i64 4, [[MUL]]
89 // X86_64-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 noundef [[ADD]]) #[[ATTR2]]
90 // X86_64-NEXT: store ptr [[CALL]], ptr [[P]], align 8
91 // X86_64-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
92 // X86_64-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP1]] to i8
93 // X86_64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 8
94 // X86_64-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[TMP2]], i32 0, i32 1, i32 1, i32 1, i32 0
95 // X86_64-NEXT: store i8 [[CONV1]], ptr [[DOT_COUNTED_BY_GEP]], align 1
96 // X86_64-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 8
97 // X86_64-NEXT: ret ptr [[TMP3]]
99 // I386-LABEL: define dso_local ptr @test2(
100 // I386-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
101 // I386-NEXT: [[ENTRY:.*:]]
102 // I386-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
103 // I386-NEXT: [[P:%.*]] = alloca ptr, align 4
104 // I386-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
105 // I386-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
106 // I386-NEXT: [[MUL:%.*]] = mul i32 4, [[TMP0]]
107 // I386-NEXT: [[ADD:%.*]] = add i32 4, [[MUL]]
108 // I386-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 noundef [[ADD]]) #[[ATTR2]]
109 // I386-NEXT: store ptr [[CALL]], ptr [[P]], align 4
110 // I386-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
111 // I386-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
112 // I386-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 4
113 // I386-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds [[STRUCT_B:%.*]], ptr [[TMP2]], i32 0, i32 1, i32 1, i32 1, i32 0
114 // I386-NEXT: store i8 [[CONV]], ptr [[DOT_COUNTED_BY_GEP]], align 1
115 // I386-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P]], align 4
116 // I386-NEXT: ret ptr [[TMP3]]
118 struct b *test2(int size) {
119 struct b *p = __builtin_malloc(sizeof(struct a) + sizeof(int) * size);
121 *__builtin_counted_by_ref(p->array) = size;
122 return p;
125 struct c {
126 char x;
127 short count;
128 int array[];
131 // X86_64-LABEL: define dso_local ptr @test3(
132 // X86_64-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
133 // X86_64-NEXT: [[ENTRY:.*:]]
134 // X86_64-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
135 // X86_64-NEXT: [[P:%.*]] = alloca ptr, align 8
136 // X86_64-NEXT: [[__IGNORED:%.*]] = alloca i64, align 8
137 // X86_64-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
138 // X86_64-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
139 // X86_64-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
140 // X86_64-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]]
141 // X86_64-NEXT: [[ADD:%.*]] = add i64 4, [[MUL]]
142 // X86_64-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 noundef [[ADD]]) #[[ATTR2]]
143 // X86_64-NEXT: store ptr [[CALL]], ptr [[P]], align 8
144 // X86_64-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
145 // X86_64-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
146 // X86_64-NEXT: store i64 [[CONV1]], ptr [[__IGNORED]], align 8
147 // X86_64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 8
148 // X86_64-NEXT: ret ptr [[TMP2]]
150 // I386-LABEL: define dso_local ptr @test3(
151 // I386-SAME: i32 noundef [[SIZE:%.*]]) #[[ATTR0]] {
152 // I386-NEXT: [[ENTRY:.*:]]
153 // I386-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4
154 // I386-NEXT: [[P:%.*]] = alloca ptr, align 4
155 // I386-NEXT: [[__IGNORED:%.*]] = alloca i32, align 4
156 // I386-NEXT: store i32 [[SIZE]], ptr [[SIZE_ADDR]], align 4
157 // I386-NEXT: [[TMP0:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
158 // I386-NEXT: [[MUL:%.*]] = mul i32 4, [[TMP0]]
159 // I386-NEXT: [[ADD:%.*]] = add i32 4, [[MUL]]
160 // I386-NEXT: [[CALL:%.*]] = call ptr @malloc(i32 noundef [[ADD]]) #[[ATTR2]]
161 // I386-NEXT: store ptr [[CALL]], ptr [[P]], align 4
162 // I386-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIZE_ADDR]], align 4
163 // I386-NEXT: store i32 [[TMP1]], ptr [[__IGNORED]], align 4
164 // I386-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P]], align 4
165 // I386-NEXT: ret ptr [[TMP2]]
167 struct c *test3(int size) {
168 struct c *p = __builtin_malloc(sizeof(struct c) + sizeof(int) * size);
169 unsigned long int __ignored;
171 *_Generic(
172 __builtin_counted_by_ref(p->array),
173 void *: &__ignored,
174 default: __builtin_counted_by_ref(p->array)) = size;
176 return p;