1 // RUN: mlir-opt %s --pass-pipeline="builtin.module(llvm.func(sroa))" --split-input-file | FileCheck %s
3 // CHECK-LABEL: llvm.func @basic_struct
4 llvm.func @basic_struct() -> i32 {
5 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
6 %0 = llvm.mlir.constant(1 : i32) : i32
7 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
8 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
9 %2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
10 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
11 %3 = llvm.load %2 : !llvm.ptr -> i32
12 // CHECK: llvm.return %[[RES]] : i32
18 // CHECK-LABEL: llvm.func @basic_array
19 llvm.func @basic_array() -> i32 {
20 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
21 %0 = llvm.mlir.constant(1 : i32) : i32
22 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
23 %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 8 : i64} : (i32) -> !llvm.ptr
24 %2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
25 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
26 %3 = llvm.load %2 : !llvm.ptr -> i32
27 // CHECK: llvm.return %[[RES]] : i32
33 // CHECK-LABEL: llvm.func @multi_level_direct
34 llvm.func @multi_level_direct() -> i32 {
35 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
36 %0 = llvm.mlir.constant(1 : i32) : i32
37 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
38 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
39 %2 = llvm.getelementptr inbounds %1[0, 2, 1, 5, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)>
40 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
41 %3 = llvm.load %2 : !llvm.ptr -> i32
42 // CHECK: llvm.return %[[RES]] : i32
48 // The first application of SROA would generate a GEP with indices [0, 0]. This
49 // test ensures this GEP is not eliminated during the first application. Even
50 // though doing it would be correct, it would prevent the second application
51 // of SROA to eliminate the array. GEPs should be eliminated only when they are
52 // truly trivial (with indices [0]).
54 // CHECK-LABEL: llvm.func @multi_level_direct_two_applications
55 llvm.func @multi_level_direct_two_applications() -> i32 {
56 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
57 %0 = llvm.mlir.constant(1 : i32) : i32
58 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
59 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, array<10 x i32>, i8)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
60 %2 = llvm.getelementptr inbounds %1[0, 2, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, array<10 x i32>, i8)>
61 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
62 %3 = llvm.load %2 : !llvm.ptr -> i32
63 // CHECK: llvm.return %[[RES]] : i32
69 // CHECK-LABEL: llvm.func @multi_level_indirect
70 llvm.func @multi_level_indirect() -> i32 {
71 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
72 %0 = llvm.mlir.constant(1 : i32) : i32
73 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
74 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
75 %2 = llvm.getelementptr inbounds %1[0, 2, 1, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)>
76 %3 = llvm.getelementptr inbounds %2[0, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
77 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
78 %4 = llvm.load %3 : !llvm.ptr -> i32
79 // CHECK: llvm.return %[[RES]] : i32
85 // This verifies that a nested GEP's users are checked properly. In this case
86 // the load goes over the bounds of the memory slot and thus should block the
87 // splitting of the alloca.
89 // CHECK-LABEL: llvm.func @nested_access_over_slot_bound
90 llvm.func @nested_access_over_slot_bound() -> i64 {
91 %0 = llvm.mlir.constant(1 : i32) : i32
92 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<(i32, struct<(
93 %1 = llvm.alloca %0 x !llvm.struct<(i32, struct<(array<10 x i32>)>, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
94 // CHECK: %[[GEP0:.*]] = llvm.getelementptr inbounds %[[ALLOCA]]
95 %2 = llvm.getelementptr inbounds %1[0, 1, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, struct<(array<10 x i32>)>, i32)>
96 // CHECK: %[[GEP1:.*]] = llvm.getelementptr inbounds %[[GEP0]]
97 %3 = llvm.getelementptr inbounds %2[0, 9] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
98 // CHECK: %[[RES:.*]] = llvm.load %[[GEP1]]
99 %4 = llvm.load %3 : !llvm.ptr -> i64
100 // CHECK: llvm.return %[[RES]] : i64
106 // CHECK-LABEL: llvm.func @resolve_alias
107 // CHECK-SAME: (%[[ARG:.*]]: i32)
108 llvm.func @resolve_alias(%arg: i32) -> i32 {
109 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
110 %0 = llvm.mlir.constant(1 : i32) : i32
111 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
112 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
113 %2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
114 %3 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
115 // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
116 llvm.store %arg, %2 : i32, !llvm.ptr
117 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
118 %4 = llvm.load %3 : !llvm.ptr -> i32
119 // CHECK: llvm.return %[[RES]] : i32
125 // CHECK-LABEL: llvm.func @no_non_single_support
126 llvm.func @no_non_single_support() -> i32 {
127 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant
128 %0 = llvm.mlir.constant(2 : i32) : i32
129 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
130 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
131 // CHECK-NOT: = llvm.alloca
132 %2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
133 %3 = llvm.load %2 : !llvm.ptr -> i32
139 // CHECK-LABEL: llvm.func @no_pointer_indexing
140 llvm.func @no_pointer_indexing() -> i32 {
141 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
142 %0 = llvm.mlir.constant(1 : i32) : i32
143 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
144 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
145 // CHECK-NOT: = llvm.alloca
146 %2 = llvm.getelementptr %1[1, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
147 %3 = llvm.load %2 : !llvm.ptr -> i32
153 // CHECK-LABEL: llvm.func @no_direct_use
154 llvm.func @no_direct_use() -> i32 {
155 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
156 %0 = llvm.mlir.constant(1 : i32) : i32
157 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
158 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
159 // CHECK-NOT: = llvm.alloca
160 %2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
161 %3 = llvm.load %2 : !llvm.ptr -> i32
162 llvm.call @use(%1) : (!llvm.ptr) -> ()
166 llvm.func @use(!llvm.ptr)
170 // CHECK-LABEL: llvm.func @direct_promotable_use_is_fine
171 llvm.func @direct_promotable_use_is_fine() -> i32 {
172 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
173 %0 = llvm.mlir.constant(1 : i32) : i32
174 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
175 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
176 %2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
177 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
178 %3 = llvm.load %2 : !llvm.ptr -> i32
179 // This is a direct use of the slot but it can be removed because it implements PromotableOpInterface.
180 llvm.intr.lifetime.start 2, %1 : !llvm.ptr
181 // CHECK: llvm.return %[[RES]] : i32
187 // CHECK-LABEL: llvm.func @direct_promotable_use_is_fine_on_accessor
188 llvm.func @direct_promotable_use_is_fine_on_accessor() -> i32 {
189 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
190 %0 = llvm.mlir.constant(1 : i32) : i32
191 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
192 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
193 %2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
194 // This does not provide side-effect info but it can be removed because it implements PromotableOpInterface.
195 %3 = llvm.intr.invariant.start 2, %2 : !llvm.ptr
196 // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
197 %4 = llvm.load %2 : !llvm.ptr -> i32
198 // This does not provide side-effect info but it can be removed because it implements PromotableOpInterface.
199 llvm.intr.invariant.end %3, 2, %2 : !llvm.ptr
200 // CHECK: llvm.return %[[RES]] : i32
206 // CHECK-LABEL: llvm.func @no_dynamic_indexing
207 // CHECK-SAME: (%[[ARG:.*]]: i32)
208 llvm.func @no_dynamic_indexing(%arg: i32) -> i32 {
209 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
210 %0 = llvm.mlir.constant(1 : i32) : i32
211 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x !llvm.array<10 x i32> {alignment = 8 : i64} : (i32) -> !llvm.ptr
212 %1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 8 : i64} : (i32) -> !llvm.ptr
213 // CHECK-NOT: = llvm.alloca
214 // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, %[[ARG]]]
215 %2 = llvm.getelementptr %1[0, %arg] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32>
216 // CHECK: %[[RES:.*]] = llvm.load %[[GEP]]
217 %3 = llvm.load %2 : !llvm.ptr -> i32
218 // CHECK: llvm.return %[[RES]] : i32
224 // CHECK-LABEL: llvm.func @no_nested_dynamic_indexing
225 // CHECK-SAME: (%[[ARG:.*]]: i32)
226 llvm.func @no_nested_dynamic_indexing(%arg: i32) -> i32 {
227 // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32)
228 %0 = llvm.mlir.constant(1 : i32) : i32
229 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x !llvm.struct<(array<10 x i32>, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
230 %1 = llvm.alloca %0 x !llvm.struct<(array<10 x i32>, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
231 // CHECK-NOT: = llvm.alloca
232 // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0, %[[ARG]]]
233 %2 = llvm.getelementptr %1[0, 0, %arg] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<(array<10 x i32>, i32)>
234 // CHECK: %[[RES:.*]] = llvm.load %[[GEP]]
235 %3 = llvm.load %2 : !llvm.ptr -> i32
236 // CHECK: llvm.return %[[RES]] : i32
242 // CHECK-LABEL: llvm.func @store_first_field
243 llvm.func @store_first_field(%arg: i32) {
244 %0 = llvm.mlir.constant(1 : i32) : i32
245 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
246 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
247 // CHECK-NEXT: llvm.store %{{.*}}, %[[ALLOCA]] : i32
248 llvm.store %arg, %1 : i32, !llvm.ptr
254 // CHECK-LABEL: llvm.func @store_first_field_different_type
255 // CHECK-SAME: (%[[ARG:.*]]: f32)
256 llvm.func @store_first_field_different_type(%arg: f32) {
257 %0 = llvm.mlir.constant(1 : i32) : i32
258 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
259 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
260 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]] : f32
261 llvm.store %arg, %1 : f32, !llvm.ptr
267 // CHECK-LABEL: llvm.func @store_sub_field
268 // CHECK-SAME: (%[[ARG:.*]]: f32)
269 llvm.func @store_sub_field(%arg: f32) {
270 %0 = llvm.mlir.constant(1 : i32) : i32
271 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i64
272 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i64, i32)> : (i32) -> !llvm.ptr
273 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]] : f32
274 llvm.store %arg, %1 : f32, !llvm.ptr
280 // CHECK-LABEL: llvm.func @load_first_field
281 llvm.func @load_first_field() -> i32 {
282 %0 = llvm.mlir.constant(1 : i32) : i32
283 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
284 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
285 // CHECK-NEXT: %[[RES:.*]] = llvm.load %[[ALLOCA]] : !llvm.ptr -> i32
286 %2 = llvm.load %1 : !llvm.ptr -> i32
287 // CHECK: llvm.return %[[RES]] : i32
293 // CHECK-LABEL: llvm.func @load_first_field_different_type
294 llvm.func @load_first_field_different_type() -> f32 {
295 %0 = llvm.mlir.constant(1 : i32) : i32
296 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
297 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
298 // CHECK-NEXT: %[[RES:.*]] = llvm.load %[[ALLOCA]] : !llvm.ptr -> f32
299 %2 = llvm.load %1 : !llvm.ptr -> f32
300 // CHECK: llvm.return %[[RES]] : f32
306 // CHECK-LABEL: llvm.func @load_sub_field
307 llvm.func @load_sub_field() -> i32 {
308 %0 = llvm.mlir.constant(1 : i32) : i32
309 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i64 : (i32) -> !llvm.ptr
310 %1 = llvm.alloca %0 x !llvm.struct<(i64, i32)> : (i32) -> !llvm.ptr
311 // CHECK-NEXT: %[[RES:.*]] = llvm.load %[[ALLOCA]]
312 %res = llvm.load %1 : !llvm.ptr -> i32
313 // CHECK: llvm.return %[[RES]] : i32
314 llvm.return %res : i32
319 // CHECK-LABEL: llvm.func @vector_store_type_mismatch
320 // CHECK-SAME: %[[ARG:.*]]: vector<4xi32>
321 llvm.func @vector_store_type_mismatch(%arg: vector<4xi32>) {
322 %0 = llvm.mlir.constant(1 : i32) : i32
323 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x vector<4xf32>
324 %1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xf32>)> : (i32) -> !llvm.ptr
325 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]]
326 llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
332 // CHECK-LABEL: llvm.func @store_to_memory
333 // CHECK-SAME: %[[ARG:.*]]: !llvm.ptr
334 llvm.func @store_to_memory(%arg: !llvm.ptr) {
335 %0 = llvm.mlir.constant(1 : i32) : i32
336 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<
337 %1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xf32>)> : (i32) -> !llvm.ptr
338 // CHECK-NEXT: llvm.store %[[ALLOCA]], %[[ARG]]
339 llvm.store %1, %arg : !llvm.ptr, !llvm.ptr
345 // CHECK-LABEL: llvm.func @type_mismatch_array_access
346 // CHECK-SAME: %[[ARG:.*]]: i32
347 llvm.func @type_mismatch_array_access(%arg: i32) {
348 %0 = llvm.mlir.constant(1 : i32) : i32
349 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
350 %1 = llvm.alloca %0 x !llvm.struct<(i32, i32, i32)> : (i32) -> !llvm.ptr
351 %2 = llvm.getelementptr %1[8] : (!llvm.ptr) -> !llvm.ptr, i8
352 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]]
353 llvm.store %arg, %2 : i32, !llvm.ptr
359 // CHECK-LABEL: llvm.func @type_mismatch_struct_access
360 // CHECK-SAME: %[[ARG:.*]]: i32
361 llvm.func @type_mismatch_struct_access(%arg: i32) {
362 %0 = llvm.mlir.constant(1 : i32) : i32
363 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
364 %1 = llvm.alloca %0 x !llvm.struct<(i32, i32, i32)> : (i32) -> !llvm.ptr
365 %2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, i32)>
366 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]]
367 llvm.store %arg, %2 : i32, !llvm.ptr
373 // CHECK-LABEL: llvm.func @index_in_final_padding
374 llvm.func @index_in_final_padding(%arg: i32) {
375 %0 = llvm.mlir.constant(1 : i32) : i32
376 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i8)>
377 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i8)> : (i32) -> !llvm.ptr
378 // CHECK: = llvm.getelementptr %[[ALLOCA]][7] : (!llvm.ptr) -> !llvm.ptr, i8
379 %2 = llvm.getelementptr %1[7] : (!llvm.ptr) -> !llvm.ptr, i8
380 llvm.store %arg, %2 : i32, !llvm.ptr
386 // CHECK-LABEL: llvm.func @index_out_of_bounds
387 llvm.func @index_out_of_bounds(%arg: i32) {
388 %0 = llvm.mlir.constant(1 : i32) : i32
389 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32)>
390 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> : (i32) -> !llvm.ptr
391 // CHECK: = llvm.getelementptr %[[ALLOCA]][9] : (!llvm.ptr) -> !llvm.ptr, i8
392 %2 = llvm.getelementptr %1[9] : (!llvm.ptr) -> !llvm.ptr, i8
393 llvm.store %arg, %2 : i32, !llvm.ptr
399 // CHECK-LABEL: llvm.func @index_in_padding
400 llvm.func @index_in_padding(%arg: i16) {
401 %0 = llvm.mlir.constant(1 : i32) : i32
402 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, i32)>
403 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, i32)> : (i32) -> !llvm.ptr
404 // CHECK: = llvm.getelementptr %[[ALLOCA]][2] : (!llvm.ptr) -> !llvm.ptr, i8
405 %2 = llvm.getelementptr %1[2] : (!llvm.ptr) -> !llvm.ptr, i8
406 llvm.store %arg, %2 : i16, !llvm.ptr
412 // CHECK-LABEL: llvm.func @index_not_in_padding_because_packed
413 // CHECK-SAME: %[[ARG:.*]]: i16
414 llvm.func @index_not_in_padding_because_packed(%arg: i16) {
415 %0 = llvm.mlir.constant(1 : i32) : i32
416 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
417 %1 = llvm.alloca %0 x !llvm.struct<"foo", packed (i16, i32)> : (i32) -> !llvm.ptr
418 %2 = llvm.getelementptr %1[2] : (!llvm.ptr) -> !llvm.ptr, i8
419 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]]
420 llvm.store %arg, %2 : i16, !llvm.ptr
426 // CHECK-LABEL: llvm.func @no_crash_on_negative_gep_index
427 // CHECK-SAME: %[[ARG:.*]]: f16
428 llvm.func @no_crash_on_negative_gep_index(%arg: f16) {
429 %0 = llvm.mlir.constant(1 : i32) : i32
430 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
431 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
432 // CHECK: llvm.getelementptr %[[ALLOCA]][-1] : (!llvm.ptr) -> !llvm.ptr, f32
433 %2 = llvm.getelementptr %1[-1] : (!llvm.ptr) -> !llvm.ptr, f32
434 llvm.store %arg, %2 : f16, !llvm.ptr
440 // CHECK-LABEL: llvm.func @out_of_bound_gep_array_access
441 // CHECK-SAME: %[[ARG:.*]]: i32
442 llvm.func @out_of_bound_gep_array_access(%arg: i32) {
443 %0 = llvm.mlir.constant(1 : i32) : i32
444 // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
445 %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> : (i32) -> !llvm.ptr
446 %2 = llvm.getelementptr %1[0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<4 x i8>
447 // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]]
448 llvm.store %arg, %2 : i32, !llvm.ptr