1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=asan -asan-instrumentation-with-call-threshold=100 -S \
4 ; RUN: opt < %s -passes=asan -asan-instrumentation-with-call-threshold=0 -S \
5 ; RUN: | FileCheck %s -check-prefix=CALLS
7 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9 define void @load.v1i32(ptr %p) sanitize_address {
10 ; CHECK-LABEL: @load.v1i32(
11 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
12 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
13 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
14 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
15 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
16 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
17 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0:![0-9]+]]
19 ; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP1]], 7
20 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 3
21 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8
22 ; CHECK-NEXT: [[TMP11:%.*]] = icmp sge i8 [[TMP10]], [[TMP5]]
23 ; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP13]]
25 ; CHECK-NEXT: call void @__asan_report_load4(i64 [[TMP1]]) #[[ATTR4:[0-9]+]]
26 ; CHECK-NEXT: unreachable
28 ; CHECK-NEXT: [[TMP14:%.*]] = load <1 x i32>, ptr [[P]], align 4
29 ; CHECK-NEXT: ret void
31 ; CALLS-LABEL: @load.v1i32(
32 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
33 ; CALLS-NEXT: call void @__asan_load4(i64 [[TMP1]])
34 ; CALLS-NEXT: [[TMP2:%.*]] = load <1 x i32>, ptr [[P]], align 4
35 ; CALLS-NEXT: ret void
37 load <1 x i32>, ptr %p
41 define void @load.v2i32(ptr %p) sanitize_address {
42 ; CHECK-LABEL: @load.v2i32(
43 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
44 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
45 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
46 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
47 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
48 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
49 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
51 ; CHECK-NEXT: call void @__asan_report_load8(i64 [[TMP1]]) #[[ATTR4]]
52 ; CHECK-NEXT: unreachable
54 ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr [[P]], align 8
55 ; CHECK-NEXT: ret void
57 ; CALLS-LABEL: @load.v2i32(
58 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
59 ; CALLS-NEXT: call void @__asan_load8(i64 [[TMP1]])
60 ; CALLS-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr [[P]], align 8
61 ; CALLS-NEXT: ret void
63 load <2 x i32>, ptr %p
67 define void @load.v4i32(ptr %p) sanitize_address {
68 ; CHECK-LABEL: @load.v4i32(
69 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
70 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
71 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
72 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
73 ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP4]], align 2
74 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i16 [[TMP5]], 0
75 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
77 ; CHECK-NEXT: call void @__asan_report_load16(i64 [[TMP1]]) #[[ATTR4]]
78 ; CHECK-NEXT: unreachable
80 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr [[P]], align 16
81 ; CHECK-NEXT: ret void
83 ; CALLS-LABEL: @load.v4i32(
84 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
85 ; CALLS-NEXT: call void @__asan_load16(i64 [[TMP1]])
86 ; CALLS-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[P]], align 16
87 ; CALLS-NEXT: ret void
89 load <4 x i32>, ptr %p
93 define void @load.v8i32(ptr %p) sanitize_address {
94 ; CHECK-LABEL: @load.v8i32(
95 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
96 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 31
97 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
98 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
99 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
100 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
101 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
102 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
103 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
104 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
106 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
107 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
108 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
109 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
111 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP4]], i64 32) #[[ATTR4]]
112 ; CHECK-NEXT: unreachable
114 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
115 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
116 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
117 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
118 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
119 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
120 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
122 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
123 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
124 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
125 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
127 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP16]], i64 32) #[[ATTR4]]
128 ; CHECK-NEXT: unreachable
130 ; CHECK-NEXT: [[TMP28:%.*]] = load <8 x i32>, ptr [[P]], align 32
131 ; CHECK-NEXT: ret void
133 ; CALLS-LABEL: @load.v8i32(
134 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
135 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP1]], i64 32)
136 ; CALLS-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[P]], align 32
137 ; CALLS-NEXT: ret void
139 load <8 x i32>, ptr %p
143 define void @load.v16i32(ptr %p) sanitize_address {
144 ; CHECK-LABEL: @load.v16i32(
145 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
146 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 63
147 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
148 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
149 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
150 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
151 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
152 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
153 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
154 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
156 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
157 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
158 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
159 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
161 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP4]], i64 64) #[[ATTR4]]
162 ; CHECK-NEXT: unreachable
164 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
165 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
166 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
167 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
168 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
169 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
170 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
172 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
173 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
174 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
175 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
177 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP16]], i64 64) #[[ATTR4]]
178 ; CHECK-NEXT: unreachable
180 ; CHECK-NEXT: [[TMP28:%.*]] = load <16 x i32>, ptr [[P]], align 64
181 ; CHECK-NEXT: ret void
183 ; CALLS-LABEL: @load.v16i32(
184 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
185 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP1]], i64 64)
186 ; CALLS-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[P]], align 64
187 ; CALLS-NEXT: ret void
189 load <16 x i32>, ptr %p
194 define void @store.v1i32(ptr %p) sanitize_address {
195 ; CHECK-LABEL: @store.v1i32(
196 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
197 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
198 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
199 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
200 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
201 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
202 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
204 ; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP1]], 7
205 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 3
206 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8
207 ; CHECK-NEXT: [[TMP11:%.*]] = icmp sge i8 [[TMP10]], [[TMP5]]
208 ; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP13]]
210 ; CHECK-NEXT: call void @__asan_report_store4(i64 [[TMP1]]) #[[ATTR4]]
211 ; CHECK-NEXT: unreachable
213 ; CHECK-NEXT: store <1 x i32> zeroinitializer, ptr [[P]], align 4
214 ; CHECK-NEXT: ret void
216 ; CALLS-LABEL: @store.v1i32(
217 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
218 ; CALLS-NEXT: call void @__asan_store4(i64 [[TMP1]])
219 ; CALLS-NEXT: store <1 x i32> zeroinitializer, ptr [[P]], align 4
220 ; CALLS-NEXT: ret void
222 store <1 x i32> zeroinitializer, ptr %p
226 define void @store.v2i32(ptr %p) sanitize_address {
227 ; CHECK-LABEL: @store.v2i32(
228 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
229 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
230 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
231 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
232 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
233 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
234 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
236 ; CHECK-NEXT: call void @__asan_report_store8(i64 [[TMP1]]) #[[ATTR4]]
237 ; CHECK-NEXT: unreachable
239 ; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
240 ; CHECK-NEXT: ret void
242 ; CALLS-LABEL: @store.v2i32(
243 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
244 ; CALLS-NEXT: call void @__asan_store8(i64 [[TMP1]])
245 ; CALLS-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
246 ; CALLS-NEXT: ret void
248 store <2 x i32> zeroinitializer, ptr %p
252 define void @store.v4i32(ptr %p) sanitize_address {
253 ; CHECK-LABEL: @store.v4i32(
254 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
255 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
256 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
257 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
258 ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP4]], align 2
259 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i16 [[TMP5]], 0
260 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
262 ; CHECK-NEXT: call void @__asan_report_store16(i64 [[TMP1]]) #[[ATTR4]]
263 ; CHECK-NEXT: unreachable
265 ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[P]], align 16
266 ; CHECK-NEXT: ret void
268 ; CALLS-LABEL: @store.v4i32(
269 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
270 ; CALLS-NEXT: call void @__asan_store16(i64 [[TMP1]])
271 ; CALLS-NEXT: store <4 x i32> zeroinitializer, ptr [[P]], align 16
272 ; CALLS-NEXT: ret void
274 store <4 x i32> zeroinitializer, ptr %p
278 define void @store.v8i32(ptr %p) sanitize_address {
279 ; CHECK-LABEL: @store.v8i32(
280 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
281 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 31
282 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
283 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
284 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
285 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
286 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
287 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
288 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
289 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
291 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
292 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
293 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
294 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
296 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP4]], i64 32) #[[ATTR4]]
297 ; CHECK-NEXT: unreachable
299 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
300 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
301 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
302 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
303 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
304 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
305 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
307 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
308 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
309 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
310 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
312 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP16]], i64 32) #[[ATTR4]]
313 ; CHECK-NEXT: unreachable
315 ; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr [[P]], align 32
316 ; CHECK-NEXT: ret void
318 ; CALLS-LABEL: @store.v8i32(
319 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
320 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP1]], i64 32)
321 ; CALLS-NEXT: store <8 x i32> zeroinitializer, ptr [[P]], align 32
322 ; CALLS-NEXT: ret void
324 store <8 x i32> zeroinitializer, ptr %p
328 define void @store.v16i32(ptr %p) sanitize_address {
329 ; CHECK-LABEL: @store.v16i32(
330 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
331 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 63
332 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
333 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
334 ; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP4]], 3
335 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 17592186044416
336 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
337 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
338 ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP8]], 0
339 ; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
341 ; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP4]], 7
342 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i8
343 ; CHECK-NEXT: [[TMP13:%.*]] = icmp sge i8 [[TMP12]], [[TMP8]]
344 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP15]]
346 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP4]], i64 64) #[[ATTR4]]
347 ; CHECK-NEXT: unreachable
349 ; CHECK-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[TMP3]] to i64
350 ; CHECK-NEXT: [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
351 ; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 17592186044416
352 ; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
353 ; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
354 ; CHECK-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
355 ; CHECK-NEXT: br i1 [[TMP21]], label [[TMP22:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
357 ; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
358 ; CHECK-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
359 ; CHECK-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
360 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP27]]
362 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP16]], i64 64) #[[ATTR4]]
363 ; CHECK-NEXT: unreachable
365 ; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr [[P]], align 64
366 ; CHECK-NEXT: ret void
368 ; CALLS-LABEL: @store.v16i32(
369 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
370 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP1]], i64 64)
371 ; CALLS-NEXT: store <16 x i32> zeroinitializer, ptr [[P]], align 64
372 ; CALLS-NEXT: ret void
374 store <16 x i32> zeroinitializer, ptr %p
378 define void @store.v2i32.align8(ptr %p) sanitize_address {
379 ; CHECK-LABEL: @store.v2i32.align8(
380 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
381 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3
382 ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], 17592186044416
383 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
384 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
385 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i8 [[TMP5]], 0
386 ; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP8:%.*]]
388 ; CHECK-NEXT: call void @__asan_report_store8(i64 [[TMP1]]) #[[ATTR4]]
389 ; CHECK-NEXT: unreachable
391 ; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
392 ; CHECK-NEXT: ret void
394 ; CALLS-LABEL: @store.v2i32.align8(
395 ; CALLS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
396 ; CALLS-NEXT: call void @__asan_store8(i64 [[TMP1]])
397 ; CALLS-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
398 ; CALLS-NEXT: ret void
400 store <2 x i32> zeroinitializer, ptr %p, align 8
404 define void @load.nxv1i32(ptr %p) sanitize_address {
405 ; CHECK-LABEL: @load.nxv1i32(
406 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
407 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
408 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
409 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
410 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
411 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
412 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
413 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
414 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
415 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
416 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
417 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
418 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
419 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
421 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
422 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
423 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
424 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
426 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
427 ; CHECK-NEXT: unreachable
429 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
430 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
431 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
432 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
433 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
434 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
435 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
437 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
438 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
439 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
440 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
442 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
443 ; CHECK-NEXT: unreachable
445 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 1 x i32>, ptr [[P]], align 4
446 ; CHECK-NEXT: ret void
448 ; CALLS-LABEL: @load.nxv1i32(
449 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
450 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
451 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
452 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
453 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
454 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 1 x i32>, ptr [[P]], align 4
455 ; CALLS-NEXT: ret void
457 load <vscale x 1 x i32>, ptr %p
461 define void @load.nxv2i32(ptr %p) sanitize_address {
462 ; CHECK-LABEL: @load.nxv2i32(
463 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
464 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
465 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
466 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
467 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
468 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
469 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
470 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
471 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
472 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
473 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
474 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
475 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
476 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
478 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
479 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
480 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
481 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
483 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
484 ; CHECK-NEXT: unreachable
486 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
487 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
488 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
489 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
490 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
491 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
492 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
494 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
495 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
496 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
497 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
499 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
500 ; CHECK-NEXT: unreachable
502 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 2 x i32>, ptr [[P]], align 8
503 ; CHECK-NEXT: ret void
505 ; CALLS-LABEL: @load.nxv2i32(
506 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
507 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
508 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
509 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
510 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
511 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[P]], align 8
512 ; CALLS-NEXT: ret void
514 load <vscale x 2 x i32>, ptr %p
518 define void @load.nxv4i32(ptr %p) sanitize_address {
519 ; CHECK-LABEL: @load.nxv4i32(
520 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
521 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
522 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
523 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
524 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
525 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
526 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
527 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
528 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
529 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
530 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
531 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
532 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
533 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
535 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
536 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
537 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
538 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
540 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
541 ; CHECK-NEXT: unreachable
543 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
544 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
545 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
546 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
547 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
548 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
549 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
551 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
552 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
553 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
554 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
556 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
557 ; CHECK-NEXT: unreachable
559 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 4 x i32>, ptr [[P]], align 16
560 ; CHECK-NEXT: ret void
562 ; CALLS-LABEL: @load.nxv4i32(
563 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
564 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
565 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
566 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
567 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
568 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 4 x i32>, ptr [[P]], align 16
569 ; CALLS-NEXT: ret void
571 load <vscale x 4 x i32>, ptr %p
575 define void @load.nxv8i32(ptr %p) sanitize_address {
576 ; CHECK-LABEL: @load.nxv8i32(
577 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
578 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
579 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
580 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
581 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
582 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
583 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
584 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
585 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
586 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
587 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
588 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
589 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
590 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
592 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
593 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
594 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
595 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
597 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
598 ; CHECK-NEXT: unreachable
600 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
601 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
602 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
603 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
604 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
605 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
606 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
608 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
609 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
610 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
611 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
613 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
614 ; CHECK-NEXT: unreachable
616 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 8 x i32>, ptr [[P]], align 32
617 ; CHECK-NEXT: ret void
619 ; CALLS-LABEL: @load.nxv8i32(
620 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
621 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
622 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
623 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
624 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
625 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 8 x i32>, ptr [[P]], align 32
626 ; CALLS-NEXT: ret void
628 load <vscale x 8 x i32>, ptr %p
632 define void @load.nxv16i32(ptr %p) sanitize_address {
633 ; CHECK-LABEL: @load.nxv16i32(
634 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
635 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
636 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
637 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
638 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
639 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
640 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
641 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
642 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
643 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
644 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
645 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
646 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
647 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
649 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
650 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
651 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
652 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
654 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
655 ; CHECK-NEXT: unreachable
657 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
658 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
659 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
660 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
661 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
662 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
663 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
665 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
666 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
667 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
668 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
670 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
671 ; CHECK-NEXT: unreachable
673 ; CHECK-NEXT: [[TMP32:%.*]] = load <vscale x 16 x i32>, ptr [[P]], align 64
674 ; CHECK-NEXT: ret void
676 ; CALLS-LABEL: @load.nxv16i32(
677 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
678 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
679 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
680 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
681 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
682 ; CALLS-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i32>, ptr [[P]], align 64
683 ; CALLS-NEXT: ret void
685 load <vscale x 16 x i32>, ptr %p
690 define void @store.nxv1i32(ptr %p) sanitize_address {
691 ; CHECK-LABEL: @store.nxv1i32(
692 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
693 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
694 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
695 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
696 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
697 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
698 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
699 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
700 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
701 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
702 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
703 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
704 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
705 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
707 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
708 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
709 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
710 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
712 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
713 ; CHECK-NEXT: unreachable
715 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
716 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
717 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
718 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
719 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
720 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
721 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
723 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
724 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
725 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
726 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
728 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
729 ; CHECK-NEXT: unreachable
731 ; CHECK-NEXT: store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
732 ; CHECK-NEXT: ret void
734 ; CALLS-LABEL: @store.nxv1i32(
735 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
736 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 32
737 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
738 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
739 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
740 ; CALLS-NEXT: store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
741 ; CALLS-NEXT: ret void
743 store <vscale x 1 x i32> zeroinitializer, ptr %p
747 define void @store.nxv2i32(ptr %p) sanitize_address {
748 ; CHECK-LABEL: @store.nxv2i32(
749 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
750 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
751 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
752 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
753 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
754 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
755 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
756 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
757 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
758 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
759 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
760 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
761 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
762 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
764 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
765 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
766 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
767 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
769 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
770 ; CHECK-NEXT: unreachable
772 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
773 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
774 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
775 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
776 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
777 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
778 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
780 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
781 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
782 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
783 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
785 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
786 ; CHECK-NEXT: unreachable
788 ; CHECK-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
789 ; CHECK-NEXT: ret void
791 ; CALLS-LABEL: @store.nxv2i32(
792 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
793 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
794 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
795 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
796 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
797 ; CALLS-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
798 ; CALLS-NEXT: ret void
800 store <vscale x 2 x i32> zeroinitializer, ptr %p
804 define void @store.nxv4i32(ptr %p) sanitize_address {
805 ; CHECK-LABEL: @store.nxv4i32(
806 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
807 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
808 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
809 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
810 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
811 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
812 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
813 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
814 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
815 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
816 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
817 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
818 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
819 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
821 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
822 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
823 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
824 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
826 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
827 ; CHECK-NEXT: unreachable
829 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
830 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
831 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
832 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
833 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
834 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
835 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
837 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
838 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
839 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
840 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
842 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
843 ; CHECK-NEXT: unreachable
845 ; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
846 ; CHECK-NEXT: ret void
848 ; CALLS-LABEL: @store.nxv4i32(
849 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
850 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 128
851 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
852 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
853 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
854 ; CALLS-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
855 ; CALLS-NEXT: ret void
857 store <vscale x 4 x i32> zeroinitializer, ptr %p
861 define void @store.nxv8i32(ptr %p) sanitize_address {
862 ; CHECK-LABEL: @store.nxv8i32(
863 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
864 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
865 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
866 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
867 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
868 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
869 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
870 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
871 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
872 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
873 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
874 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
875 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
876 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
878 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
879 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
880 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
881 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
883 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
884 ; CHECK-NEXT: unreachable
886 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
887 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
888 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
889 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
890 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
891 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
892 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
894 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
895 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
896 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
897 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
899 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
900 ; CHECK-NEXT: unreachable
902 ; CHECK-NEXT: store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
903 ; CHECK-NEXT: ret void
905 ; CALLS-LABEL: @store.nxv8i32(
906 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
907 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 256
908 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
909 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
910 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
911 ; CALLS-NEXT: store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
912 ; CALLS-NEXT: ret void
914 store <vscale x 8 x i32> zeroinitializer, ptr %p
918 define void @store.nxv16i32(ptr %p) sanitize_address {
919 ; CHECK-LABEL: @store.nxv16i32(
920 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
921 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
922 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
923 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
924 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
925 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
926 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
927 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
928 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
929 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
930 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
931 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
932 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
933 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
935 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
936 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
937 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
938 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
940 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
941 ; CHECK-NEXT: unreachable
943 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
944 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
945 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
946 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
947 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
948 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
949 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
951 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
952 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
953 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
954 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
956 ; CHECK-NEXT: call void @__asan_report_store_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
957 ; CHECK-NEXT: unreachable
959 ; CHECK-NEXT: store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
960 ; CHECK-NEXT: ret void
962 ; CALLS-LABEL: @store.nxv16i32(
963 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
964 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 512
965 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
966 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
967 ; CALLS-NEXT: call void @__asan_storeN(i64 [[TMP4]], i64 [[TMP3]])
968 ; CALLS-NEXT: store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
969 ; CALLS-NEXT: ret void
971 store <vscale x 16 x i32> zeroinitializer, ptr %p
975 declare void @clobber(ptr)
977 define <vscale x 2 x i32> @local_alloca() sanitize_address {
978 ; CHECK-LABEL: @local_alloca(
979 ; CHECK-NEXT: [[A:%.*]] = alloca <vscale x 2 x i32>, align 8
980 ; CHECK-NEXT: call void @clobber(ptr [[A]])
981 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
982 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
983 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
984 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
985 ; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], 1
986 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
987 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
988 ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A]] to i64
989 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
990 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
991 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
992 ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
993 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
994 ; CHECK-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
996 ; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP8]], 7
997 ; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
998 ; CHECK-NEXT: [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
999 ; CHECK-NEXT: br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
1001 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
1002 ; CHECK-NEXT: unreachable
1004 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
1005 ; CHECK-NEXT: [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
1006 ; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
1007 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
1008 ; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
1009 ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
1010 ; CHECK-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
1012 ; CHECK-NEXT: [[TMP27:%.*]] = and i64 [[TMP20]], 7
1013 ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
1014 ; CHECK-NEXT: [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
1015 ; CHECK-NEXT: br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
1017 ; CHECK-NEXT: call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
1018 ; CHECK-NEXT: unreachable
1020 ; CHECK-NEXT: [[RES:%.*]] = load <vscale x 2 x i32>, ptr [[A]], align 8
1021 ; CHECK-NEXT: ret <vscale x 2 x i32> [[RES]]
1023 ; CALLS-LABEL: @local_alloca(
1024 ; CALLS-NEXT: [[A:%.*]] = alloca <vscale x 2 x i32>, align 8
1025 ; CALLS-NEXT: call void @clobber(ptr [[A]])
1026 ; CALLS-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
1027 ; CALLS-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 64
1028 ; CALLS-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
1029 ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
1030 ; CALLS-NEXT: call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
1031 ; CALLS-NEXT: [[RES:%.*]] = load <vscale x 2 x i32>, ptr [[A]], align 8
1032 ; CALLS-NEXT: ret <vscale x 2 x i32> [[RES]]
1034 %a = alloca <vscale x 2 x i32>
1035 call void @clobber(ptr %a)
1036 %res = load <vscale x 2 x i32>, ptr %a
1037 ret <vscale x 2 x i32> %res