1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=asan -asan-instrumentation-with-call-threshold=100 -S | FileCheck %s
3 ; RUN: opt < %s -passes=asan -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s -check-prefix=CALLS
5 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7 define void @load.v1i32(ptr %p) sanitize_address {
8 ; CHECK-LABEL: @load.v1i32(
9 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
10 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
11 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
12 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
13 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
14 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
15 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0:![0-9]+]]
17 ; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP1]], 7
18 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 3
19 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8
20 ; CHECK-NEXT: [[TMP11:%.*]] = icmp sge i8 [[TMP10]], [[TMP5]]
21 ; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP13]]
23 ; CHECK-NEXT: call void @__asan_report_load4(i64 [[TMP1]]) #[[ATTR4:[0-9]+]]
24 ; CHECK-NEXT: unreachable
26 ; CHECK-NEXT: [[TMP14:%.*]] = load <1 x i32>, ptr [[P]], align 4
27 ; CHECK-NEXT: ret void
29 ; CALLS-LABEL: @load.v1i32(
30 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
31 ; CALLS-NEXT: call void @__asan_load4(i64 [[TMP1]])
32 ; CALLS-NEXT: [[TMP2:%.*]] = load <1 x i32>, ptr [[P]], align 4
33 ; CALLS-NEXT: ret void
35 load <1 x i32>, ptr %p
39 define void @load.v2i32(ptr %p) sanitize_address {
40 ; CHECK-LABEL: @load.v2i32(
41 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
42 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
43 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
44 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
45 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
46 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
47 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
49 ; CHECK-NEXT: call void @__asan_report_load8(i64 [[TMP1]]) #[[ATTR4]]
50 ; CHECK-NEXT: unreachable
52 ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr [[P]], align 8
53 ; CHECK-NEXT: ret void
55 ; CALLS-LABEL: @load.v2i32(
56 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
57 ; CALLS-NEXT: call void @__asan_load8(i64 [[TMP1]])
58 ; CALLS-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[P]], align 8
59 ; CALLS-NEXT: ret void
61 load <2 x i32>, ptr %p
65 define void @load.v4i32(ptr %p) sanitize_address {
66 ; CHECK-LABEL: @load.v4i32(
67 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
68 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
69 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
70 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
71 ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP4]], align 2
72 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i16 [[TMP5]], 0
73 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
75 ; CHECK-NEXT: call void @__asan_report_load16(i64 [[TMP1]]) #[[ATTR4]]
76 ; CHECK-NEXT: unreachable
78 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr [[P]], align 16
79 ; CHECK-NEXT: ret void
81 ; CALLS-LABEL: @load.v4i32(
82 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
83 ; CALLS-NEXT: call void @__asan_load16(i64 [[TMP1]])
84 ; CALLS-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[P]], align 16
85 ; CALLS-NEXT: ret void
87 load <4 x i32>, ptr %p
91 define void @load.v8i32(ptr %p) sanitize_address {
92 ; CHECK-LABEL: @load.v8i32(
93 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
94 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 31
95 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
96 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
97 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
98 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
99 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
100 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
101 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
102 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
104 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
105 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
106 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
107 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
109 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP4]], i64 32) #[[ATTR4]]
110 ; CHECK-NEXT: unreachable
112 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
113 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
114 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
115 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
116 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
117 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
118 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
120 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
121 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
122 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
123 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
125 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP16]], i64 32) #[[ATTR4]]
126 ; CHECK-NEXT: unreachable
128 ; CHECK-NEXT: [[TMP28:%.*]] = load <8 x i32>, ptr [[P]], align 32
129 ; CHECK-NEXT: ret void
131 ; CALLS-LABEL: @load.v8i32(
132 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
133 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP1]], i64 32)
134 ; CALLS-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[P]], align 32
135 ; CALLS-NEXT: ret void
137 load <8 x i32>, ptr %p
141 define void @load.v16i32(ptr %p) sanitize_address {
142 ; CHECK-LABEL: @load.v16i32(
143 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
144 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 63
145 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
146 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
147 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
148 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
149 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
150 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
151 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
152 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
154 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
155 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
156 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
157 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
159 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP4]], i64 64) #[[ATTR4]]
160 ; CHECK-NEXT: unreachable
162 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
163 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
164 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
165 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
166 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
167 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
168 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
170 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
171 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
172 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
173 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
175 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP16]], i64 64) #[[ATTR4]]
176 ; CHECK-NEXT: unreachable
178 ; CHECK-NEXT: [[TMP28:%.*]] = load <16 x i32>, ptr [[P]], align 64
179 ; CHECK-NEXT: ret void
181 ; CALLS-LABEL: @load.v16i32(
182 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
183 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP1]], i64 64)
184 ; CALLS-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[P]], align 64
185 ; CALLS-NEXT: ret void
187 load <16 x i32>, ptr %p
192 define void @store.v1i32(ptr %p) sanitize_address {
193 ; CHECK-LABEL: @store.v1i32(
194 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
195 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
196 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
197 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
198 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
199 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
200 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
202 ; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP1]], 7
203 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 3
204 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8
205 ; CHECK-NEXT: [[TMP11:%.*]] = icmp sge i8 [[TMP10]], [[TMP5]]
206 ; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP13]]
208 ; CHECK-NEXT: call void @__asan_report_store4(i64 [[TMP1]]) #[[ATTR4]]
209 ; CHECK-NEXT: unreachable
211 ; CHECK-NEXT: store <1 x i32> zeroinitializer, ptr [[P]], align 4
212 ; CHECK-NEXT: ret void
214 ; CALLS-LABEL: @store.v1i32(
215 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
216 ; CALLS-NEXT: call void @__asan_store4(i64 [[TMP1]])
217 ; CALLS-NEXT: store <1 x i32> zeroinitializer, ptr [[P]], align 4
218 ; CALLS-NEXT: ret void
220 store <1 x i32> zeroinitializer, ptr %p
224 define void @store.v2i32(ptr %p) sanitize_address {
225 ; CHECK-LABEL: @store.v2i32(
226 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
227 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
228 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
229 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
230 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
231 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
232 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
234 ; CHECK-NEXT: call void @__asan_report_store8(i64 [[TMP1]]) #[[ATTR4]]
235 ; CHECK-NEXT: unreachable
237 ; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
238 ; CHECK-NEXT: ret void
240 ; CALLS-LABEL: @store.v2i32(
241 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
242 ; CALLS-NEXT: call void @__asan_store8(i64 [[TMP1]])
243 ; CALLS-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
244 ; CALLS-NEXT: ret void
246 store <2 x i32> zeroinitializer, ptr %p
250 define void @store.v4i32(ptr %p) sanitize_address {
251 ; CHECK-LABEL: @store.v4i32(
252 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
253 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
254 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
255 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
256 ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP4]], align 2
257 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i16 [[TMP5]], 0
258 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
260 ; CHECK-NEXT: call void @__asan_report_store16(i64 [[TMP1]]) #[[ATTR4]]
261 ; CHECK-NEXT: unreachable
263 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[P]], align 16
264 ; CHECK-NEXT: ret void
266 ; CALLS-LABEL: @store.v4i32(
267 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
268 ; CALLS-NEXT: call void @__asan_store16(i64 [[TMP1]])
269 ; CALLS-NEXT: store <4 x i32> zeroinitializer, ptr [[P]], align 16
270 ; CALLS-NEXT: ret void
272 store <4 x i32> zeroinitializer, ptr %p
276 define void @store.v8i32(ptr %p) sanitize_address {
277 ; CHECK-LABEL: @store.v8i32(
278 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
279 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 31
280 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
281 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
282 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
283 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
284 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
285 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
286 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
287 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
289 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
290 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
291 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
292 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
294 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP4]], i64 32) #[[ATTR4]]
295 ; CHECK-NEXT: unreachable
297 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
298 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
299 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
300 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
301 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
302 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
303 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
305 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
306 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
307 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
308 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
310 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP16]], i64 32) #[[ATTR4]]
311 ; CHECK-NEXT: unreachable
313 ; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr [[P]], align 32
314 ; CHECK-NEXT: ret void
316 ; CALLS-LABEL: @store.v8i32(
317 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
318 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP1]], i64 32)
319 ; CALLS-NEXT: store <8 x i32> zeroinitializer, ptr [[P]], align 32
320 ; CALLS-NEXT: ret void
322 store <8 x i32> zeroinitializer, ptr %p
326 define void @store.v16i32(ptr %p) sanitize_address {
327 ; CHECK-LABEL: @store.v16i32(
328 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
329 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 63
330 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
331 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
332 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
333 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
334 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
335 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
336 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
337 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
339 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
340 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
341 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
342 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
344 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP4]], i64 64) #[[ATTR4]]
345 ; CHECK-NEXT: unreachable
347 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
348 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
349 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
350 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
351 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
352 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
353 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
355 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
356 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
357 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
358 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
360 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP16]], i64 64) #[[ATTR4]]
361 ; CHECK-NEXT: unreachable
363 ; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr [[P]], align 64
364 ; CHECK-NEXT: ret void
366 ; CALLS-LABEL: @store.v16i32(
367 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
368 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP1]], i64 64)
369 ; CALLS-NEXT: store <16 x i32> zeroinitializer, ptr [[P]], align 64
370 ; CALLS-NEXT: ret void
372 store <16 x i32> zeroinitializer, ptr %p
376 define void @store.v2i32.align8(ptr %p) sanitize_address {
377 ; CHECK-LABEL: @store.v2i32.align8(
378 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
379 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
380 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
381 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
382 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
383 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
384 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
386 ; CHECK-NEXT: call void @__asan_report_store8(i64 [[TMP1]]) #[[ATTR4]]
387 ; CHECK-NEXT: unreachable
389 ; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
390 ; CHECK-NEXT: ret void
392 ; CALLS-LABEL: @store.v2i32.align8(
393 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
394 ; CALLS-NEXT: call void @__asan_store8(i64 [[TMP1]])
395 ; CALLS-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
396 ; CALLS-NEXT: ret void
398 store <2 x i32> zeroinitializer, ptr %p, align 8
402 define void @load.nxv1i32(ptr %p) sanitize_address {
403 ; CHECK-LABEL: @load.nxv1i32(
404 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
405 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
406 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
407 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
408 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
409 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
410 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
411 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
412 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
413 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
414 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
415 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
416 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
417 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
419 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
420 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
421 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
422 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
424 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
425 ; CHECK-NEXT: unreachable
427 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
428 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
429 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
430 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
431 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
432 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
433 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
435 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
436 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
437 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
438 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
440 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
441 ; CHECK-NEXT: unreachable
443 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 1 x i32>, ptr [[P]], align 4
444 ; CHECK-NEXT: ret void
446 ; CALLS-LABEL: @load.nxv1i32(
447 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
448 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
449 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
450 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
451 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
452 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 1 x i32>, ptr [[P]], align 4
453 ; CALLS-NEXT: ret void
455 load <vscale x 1 x i32>, ptr %p
459 define void @load.nxv2i32(ptr %p) sanitize_address {
460 ; CHECK-LABEL: @load.nxv2i32(
461 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
462 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
463 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
464 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
465 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
466 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
467 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
468 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
469 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
470 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
471 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
472 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
473 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
474 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
476 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
477 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
478 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
479 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
481 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
482 ; CHECK-NEXT: unreachable
484 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
485 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
486 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
487 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
488 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
489 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
490 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
492 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
493 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
494 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
495 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
497 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
498 ; CHECK-NEXT: unreachable
500 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 2 x i32>, ptr [[P]], align 8
501 ; CHECK-NEXT: ret void
503 ; CALLS-LABEL: @load.nxv2i32(
504 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
505 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
506 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
507 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
508 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
509 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[P]], align 8
510 ; CALLS-NEXT: ret void
512 load <vscale x 2 x i32>, ptr %p
516 define void @load.nxv4i32(ptr %p) sanitize_address {
517 ; CHECK-LABEL: @load.nxv4i32(
518 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
519 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
520 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
521 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
522 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
523 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
524 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
525 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
526 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
527 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
528 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
529 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
530 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
531 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
533 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
534 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
535 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
536 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
538 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
539 ; CHECK-NEXT: unreachable
541 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
542 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
543 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
544 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
545 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
546 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
547 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
549 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
550 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
551 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
552 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
554 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
555 ; CHECK-NEXT: unreachable
557 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 4 x i32>, ptr [[P]], align 16
558 ; CHECK-NEXT: ret void
560 ; CALLS-LABEL: @load.nxv4i32(
561 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
562 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
563 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
564 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
565 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
566 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 4 x i32>, ptr [[P]], align 16
567 ; CALLS-NEXT: ret void
569 load <vscale x 4 x i32>, ptr %p
573 define void @load.nxv8i32(ptr %p) sanitize_address {
574 ; CHECK-LABEL: @load.nxv8i32(
575 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
576 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
577 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
578 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
579 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
580 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
581 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
582 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
583 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
584 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
585 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
586 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
587 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
588 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
590 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
591 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
592 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
593 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
595 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
596 ; CHECK-NEXT: unreachable
598 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
599 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
600 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
601 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
602 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
603 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
604 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
606 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
607 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
608 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
609 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
611 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
612 ; CHECK-NEXT: unreachable
614 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 8 x i32>, ptr [[P]], align 32
615 ; CHECK-NEXT: ret void
617 ; CALLS-LABEL: @load.nxv8i32(
618 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
619 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
620 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
621 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
622 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
623 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 8 x i32>, ptr [[P]], align 32
624 ; CALLS-NEXT: ret void
626 load <vscale x 8 x i32>, ptr %p
630 define void @load.nxv16i32(ptr %p) sanitize_address {
631 ; CHECK-LABEL: @load.nxv16i32(
632 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
633 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
634 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
635 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
636 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
637 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
638 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
639 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
640 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
641 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
642 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
643 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
644 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
645 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
647 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
648 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
649 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
650 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
652 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
653 ; CHECK-NEXT: unreachable
655 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
656 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
657 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
658 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
659 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
660 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
661 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
663 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
664 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
665 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
666 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
668 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
669 ; CHECK-NEXT: unreachable
671 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 16 x i32>, ptr [[P]], align 64
672 ; CHECK-NEXT: ret void
674 ; CALLS-LABEL: @load.nxv16i32(
675 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
676 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
677 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
678 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
679 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
680 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i32>, ptr [[P]], align 64
681 ; CALLS-NEXT: ret void
683 load <vscale x 16 x i32>, ptr %p
688 define void @store.nxv1i32(ptr %p) sanitize_address {
689 ; CHECK-LABEL: @store.nxv1i32(
690 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
691 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
692 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
693 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
694 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
695 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
696 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
697 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
698 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
699 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
700 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
701 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
702 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
703 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
705 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
706 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
707 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
708 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
710 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
711 ; CHECK-NEXT: unreachable
713 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
714 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
715 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
716 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
717 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
718 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
719 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
721 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
722 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
723 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
724 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
726 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
727 ; CHECK-NEXT: unreachable
729 ; CHECK-NEXT: store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
730 ; CHECK-NEXT: ret void
732 ; CALLS-LABEL: @store.nxv1i32(
733 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
734 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
735 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
736 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
737 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
738 ; CALLS-NEXT: store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
739 ; CALLS-NEXT: ret void
741 store <vscale x 1 x i32> zeroinitializer, ptr %p
745 define void @store.nxv2i32(ptr %p) sanitize_address {
746 ; CHECK-LABEL: @store.nxv2i32(
747 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
748 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
749 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
750 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
751 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
752 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
753 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
754 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
755 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
756 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
757 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
758 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
759 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
760 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
762 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
763 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
764 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
765 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
767 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
768 ; CHECK-NEXT: unreachable
770 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
771 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
772 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
773 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
774 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
775 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
776 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
778 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
779 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
780 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
781 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
783 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
784 ; CHECK-NEXT: unreachable
786 ; CHECK-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
787 ; CHECK-NEXT: ret void
789 ; CALLS-LABEL: @store.nxv2i32(
790 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
791 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
792 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
793 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
794 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
795 ; CALLS-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
796 ; CALLS-NEXT: ret void
798 store <vscale x 2 x i32> zeroinitializer, ptr %p
802 define void @store.nxv4i32(ptr %p) sanitize_address {
803 ; CHECK-LABEL: @store.nxv4i32(
804 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
805 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
806 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
807 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
808 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
809 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
810 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
811 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
812 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
813 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
814 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
815 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
816 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
817 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
819 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
820 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
821 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
822 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
824 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
825 ; CHECK-NEXT: unreachable
827 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
828 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
829 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
830 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
831 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
832 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
833 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
835 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
836 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
837 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
838 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
840 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
841 ; CHECK-NEXT: unreachable
843 ; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
844 ; CHECK-NEXT: ret void
846 ; CALLS-LABEL: @store.nxv4i32(
847 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
848 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
849 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
850 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
851 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
852 ; CALLS-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
853 ; CALLS-NEXT: ret void
855 store <vscale x 4 x i32> zeroinitializer, ptr %p
859 define void @store.nxv8i32(ptr %p) sanitize_address {
860 ; CHECK-LABEL: @store.nxv8i32(
861 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
862 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
863 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
864 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
865 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
866 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
867 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
868 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
869 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
870 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
871 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
872 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
873 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
874 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
876 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
877 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
878 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
879 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
881 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
882 ; CHECK-NEXT: unreachable
884 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
885 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
886 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
887 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
888 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
889 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
890 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
892 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
893 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
894 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
895 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
897 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
898 ; CHECK-NEXT: unreachable
900 ; CHECK-NEXT: store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
901 ; CHECK-NEXT: ret void
903 ; CALLS-LABEL: @store.nxv8i32(
904 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
905 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
906 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
907 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
908 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
909 ; CALLS-NEXT: store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
910 ; CALLS-NEXT: ret void
912 store <vscale x 8 x i32> zeroinitializer, ptr %p
916 define void @store.nxv16i32(ptr %p) sanitize_address {
917 ; CHECK-LABEL: @store.nxv16i32(
918 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
919 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
920 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
921 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
922 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
923 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
924 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
925 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
926 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
927 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
928 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
929 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
930 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
931 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
933 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
934 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
935 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
936 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
938 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
939 ; CHECK-NEXT: unreachable
941 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
942 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
943 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
944 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
945 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
946 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
947 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
949 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
950 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
951 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
952 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
954 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
955 ; CHECK-NEXT: unreachable
957 ; CHECK-NEXT: store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
958 ; CHECK-NEXT: ret void
960 ; CALLS-LABEL: @store.nxv16i32(
961 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
962 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
963 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
964 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
965 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
966 ; CALLS-NEXT: store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
967 ; CALLS-NEXT: ret void
969 store <vscale x 16 x i32> zeroinitializer, ptr %p
973 declare void @clobber(ptr)
975 define <vscale x 2 x i32> @local_alloca() sanitize_address {
976 ; CHECK-LABEL: @local_alloca(
977 ; CHECK-NEXT: [[A:%.*]] = alloca <vscale x 2 x i32>, align 8
978 ; CHECK-NEXT: call void @clobber(ptr [[A]])
979 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
980 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
981 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
982 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
983 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
984 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
985 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
986 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A]] to i64
987 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
988 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
989 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
990 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
991 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
992 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
994 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
995 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
996 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
997 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
999 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
1000 ; CHECK-NEXT: unreachable
1002 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
1003 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
1004 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
1005 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
1006 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
1007 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
1008 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
1010 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
1011 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
1012 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
1013 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
1015 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
1016 ; CHECK-NEXT: unreachable
1018 ; CHECK-NEXT: [[RES:%.*]] = load <vscale x 2 x i32>, ptr [[A]], align 8
1019 ; CHECK-NEXT: ret <vscale x 2 x i32> [[RES]]
1021 ; CALLS-LABEL: @local_alloca(
1022 ; CALLS-NEXT: [[A:%.*]] = alloca <vscale x 2 x i32>, align 8
1023 ; CALLS-NEXT: call void @clobber(ptr [[A]])
1024 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
1025 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
1026 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
1027 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
1028 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
1029 ; CALLS-NEXT: [[RES:%.*]] = load <vscale x 2 x i32>, ptr [[A]], align 8
1030 ; CALLS-NEXT: ret <vscale x 2 x i32> [[RES]]
1032 %a = alloca <vscale x 2 x i32>
1033 call void @clobber(ptr %a)
1034 %res = load <vscale x 2 x i32>, ptr %a
1035 ret <vscale x 2 x i32> %res