1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=nsan -nsan-shadow-type-mapping=dqq -S %s | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 ; Tests with memory manipulation (memcpy, llvm.memcpy, ...).
8 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
10 define void @call_memcpy_intrinsics(i8* nonnull align 8 dereferenceable(16) %a, i8* nonnull align 8 dereferenceable(16) %b) sanitize_numerical_stability {
11 ; CHECK-LABEL: @call_memcpy_intrinsics(
13 ; CHECK-NEXT: call void @__nsan_copy_4(ptr [[A:%.*]], ptr [[B:%.*]])
14 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], ptr nonnull align 8 dereferenceable(16) [[B]], i64 4, i1 false)
15 ; CHECK-NEXT: call void @__nsan_copy_8(ptr [[A:%.*]], ptr [[B:%.*]])
16 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], ptr nonnull align 8 dereferenceable(16) [[B]], i64 8, i1 false)
17 ; CHECK-NEXT: call void @__nsan_copy_16(ptr [[A:%.*]], ptr [[B:%.*]])
18 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], ptr nonnull align 8 dereferenceable(16) [[B]], i64 16, i1 false)
19 ; CHECK-NEXT: call void @__nsan_copy_values(ptr [[A:%.*]], ptr [[B:%.*]], i64 15)
20 ; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], ptr nonnull align 8 dereferenceable(16) [[B]], i64 15, i1 false)
21 ; CHECK-NEXT: ret void
24 tail call void @llvm.memcpy.p0i8.p0i8.i64(ptr nonnull align 8 dereferenceable(16) %a, ptr nonnull align 8 dereferenceable(16) %b, i64 4, i1 false)
25 tail call void @llvm.memcpy.p0i8.p0i8.i64(ptr nonnull align 8 dereferenceable(16) %a, ptr nonnull align 8 dereferenceable(16) %b, i64 8, i1 false)
26 tail call void @llvm.memcpy.p0i8.p0i8.i64(ptr nonnull align 8 dereferenceable(16) %a, ptr nonnull align 8 dereferenceable(16) %b, i64 16, i1 false)
27 tail call void @llvm.memcpy.p0i8.p0i8.i64(ptr nonnull align 8 dereferenceable(16) %a, ptr nonnull align 8 dereferenceable(16) %b, i64 15, i1 false)
31 declare dso_local i8* @memcpy(i8*, i8*, i64) local_unnamed_addr
33 define void @call_memcpy(i8* nonnull align 8 dereferenceable(16) %a, i8* nonnull align 8 dereferenceable(16) %b) sanitize_numerical_stability {
34 ; CHECK-LABEL: @call_memcpy(
36 ; CHECK-NEXT: [[TMP0:%.*]] = tail call ptr @memcpy(ptr nonnull align 8 dereferenceable(16) [[A:%.*]], ptr nonnull align 8 dereferenceable(16) [[B:%.*]], i64 16) #[[ATTR3:[0-9]+]]
37 ; CHECK-NEXT: ret void
40 tail call i8* @memcpy(ptr nonnull align 8 dereferenceable(16) %a, ptr nonnull align 8 dereferenceable(16) %b, i64 16)
44 define void @call_memset_intrinsics(i8* nonnull align 8 dereferenceable(16) %a) sanitize_numerical_stability {
45 ; CHECK-LABEL: @call_memset_intrinsics(
47 ; CHECK-NEXT: call void @__nsan_set_value_unknown_4(ptr [[A:%.*]])
48 ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], i8 0, i64 4, i1 false)
49 ; CHECK-NEXT: call void @__nsan_set_value_unknown_8(ptr [[A:%.*]])
50 ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], i8 0, i64 8, i1 false)
51 ; CHECK-NEXT: call void @__nsan_set_value_unknown_16(ptr [[A:%.*]])
52 ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], i8 0, i64 16, i1 false)
53 ; CHECK-NEXT: call void @__nsan_set_value_unknown(ptr [[A:%.*]], i64 15)
54 ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) [[A]], i8 0, i64 15, i1 false)
55 ; CHECK-NEXT: ret void
58 tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) %a, i8 0, i64 4, i1 false)
59 tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) %a, i8 0, i64 8, i1 false)
60 tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) %a, i8 0, i64 16, i1 false)
61 tail call void @llvm.memset.p0.i64(ptr nonnull align 8 dereferenceable(16) %a, i8 0, i64 15, i1 false)
65 define void @transfer_float(float* %dst, float* %src) sanitize_numerical_stability {
66 ; CHECK-LABEL: @transfer_float(
68 ; CHECK-NEXT: [[T:%.*]] = load float, ptr [[SRC:%.*]], align 4
69 ; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[SRC]], i64 1)
70 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[TMP0]], null
71 ; CHECK-NEXT: br i1 [[TMP1]], label [[TMP4:%.*]], label [[TMP2:%.*]]
73 ; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP0]], align 1
74 ; CHECK-NEXT: br label [[TMP6:%.*]]
76 ; CHECK-NEXT: [[TMP5:%.*]] = fpext float [[T]] to double
77 ; CHECK-NEXT: br label [[TMP6]]
79 ; CHECK-NEXT: [[TMP7:%.*]] = phi double [ [[TMP3]], [[TMP2]] ], [ [[TMP5]], [[TMP4]] ]
80 ; CHECK-NEXT: [[TMP8:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[DST:%.*]], i64 1)
81 ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[DST]] to i64
82 ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @__nsan_internal_check_float_d(float [[T]], double [[TMP7]], i32 4, i64 [[TMP9]])
83 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP10]], 1
84 ; CHECK-NEXT: [[TMP12:%.*]] = fpext float [[T]] to double
85 ; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[TMP11]], double [[TMP12]], double [[TMP7]]
86 ; CHECK-NEXT: store double [[TMP13]], ptr [[TMP8]], align 1
87 ; CHECK-NEXT: store float [[T]], ptr [[DST]], align 1
88 ; CHECK-NEXT: ret void
91 %t = load float, ptr %src
92 store float %t, ptr %dst, align 1
96 define void @transfer_non_float(i32* %dst, i32* %src) sanitize_numerical_stability {
97 ; CHECK-LABEL: @transfer_non_float(
99 ; CHECK-NEXT: [[T:%.*]] = load i32, ptr [[SRC:%.*]], align 4
100 ; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[SRC]])
101 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 1
102 ; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[SRC]])
103 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 1
104 ; CHECK-NEXT: store i32 [[T]], ptr [[DST:%.*]], align 1
105 ; CHECK-NEXT: [[TMP4:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[DST]])
106 ; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 1
107 ; CHECK-NEXT: [[TMP5:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[DST]])
108 ; CHECK-NEXT: store i64 [[TMP3]], ptr [[TMP5]], align 1
109 ; CHECK-NEXT: ret void
112 %t = load i32, ptr %src
113 store i32 %t, ptr %dst, align 1
117 define void @transfer_array([2 x float]* %a) sanitize_numerical_stability {
118 ; CHECK-LABEL: @transfer_array(
120 ; CHECK-NEXT: [[B:%.*]] = load [2 x float], ptr [[A:%.*]], align 1
121 ; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[A]])
122 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 1
123 ; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[A]])
124 ; CHECK-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP2]], align 1
125 ; CHECK-NEXT: store [2 x float] [[B]], ptr [[A]], align 1
126 ; CHECK-NEXT: [[TMP4:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[A]])
127 ; CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 1
128 ; CHECK-NEXT: [[TMP5:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[A]])
129 ; CHECK-NEXT: store i128 [[TMP3]], ptr [[TMP5]], align 1
130 ; CHECK-NEXT: ret void
133 %b = load [2 x float], ptr %a, align 1
134 store [2 x float] %b, ptr %a, align 1
138 define void @swap_untyped1(i64* nonnull align 8 %p, i64* nonnull align 8 %q) sanitize_numerical_stability {
139 ; CHECK-LABEL: @swap_untyped1(
140 ; CHECK-NEXT: [[QV:%.*]] = load i64, ptr [[Q:%.*]], align 8
141 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[Q]])
142 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 1
143 ; CHECK-NEXT: [[TMP3:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[Q]])
144 ; CHECK-NEXT: [[TMP4:%.*]] = load i128, ptr [[TMP3]], align 1
145 ; CHECK-NEXT: [[PV:%.*]] = load i64, ptr [[P:%.*]], align 8
146 ; CHECK-NEXT: [[TMP5:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[P]])
147 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 1
148 ; CHECK-NEXT: [[TMP7:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[P]])
149 ; CHECK-NEXT: [[TMP8:%.*]] = load i128, ptr [[TMP7]], align 1
150 ; CHECK-NEXT: store i64 [[PV]], ptr [[Q]], align 8
151 ; CHECK-NEXT: [[TMP9:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[Q]])
152 ; CHECK-NEXT: store i64 [[TMP6]], ptr [[TMP9]], align 1
153 ; CHECK-NEXT: [[TMP10:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[Q]])
154 ; CHECK-NEXT: store i128 [[TMP8]], ptr [[TMP10]], align 1
155 ; CHECK-NEXT: store i64 [[QV]], ptr [[P]], align 8
156 ; CHECK-NEXT: [[TMP11:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[P]])
157 ; CHECK-NEXT: store i64 [[TMP2]], ptr [[TMP11]], align 1
158 ; CHECK-NEXT: [[TMP12:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[P]])
159 ; CHECK-NEXT: store i128 [[TMP4]], ptr [[TMP12]], align 1
160 ; CHECK-NEXT: ret void
162 %qv = load i64, ptr %q
163 %pv = load i64, ptr %p
164 store i64 %pv, ptr %q, align 8
165 store i64 %qv, ptr %p, align 8
169 ; Same as swap_untyped1, but the load/stores are in the opposite order.
170 define void @swap_untyped2(i64* nonnull align 8 %p, i64* nonnull align 8 %q) sanitize_numerical_stability {
171 ; CHECK-LABEL: @swap_untyped2(
172 ; CHECK-NEXT: [[PV:%.*]] = load i64, ptr [[P:%.*]], align 8
173 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[P]])
174 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 1
175 ; CHECK-NEXT: [[TMP3:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[P]])
176 ; CHECK-NEXT: [[TMP4:%.*]] = load i128, ptr [[TMP3]], align 1
177 ; CHECK-NEXT: [[QV:%.*]] = load i64, ptr [[Q:%.*]], align 8
178 ; CHECK-NEXT: [[TMP5:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[Q]])
179 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 1
180 ; CHECK-NEXT: [[TMP7:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[Q]])
181 ; CHECK-NEXT: [[TMP8:%.*]] = load i128, ptr [[TMP7]], align 1
182 ; CHECK-NEXT: store i64 [[PV]], ptr [[Q]], align 8
183 ; CHECK-NEXT: [[TMP9:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[Q]])
184 ; CHECK-NEXT: store i64 [[TMP2]], ptr [[TMP9]], align 1
185 ; CHECK-NEXT: [[TMP10:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[Q]])
186 ; CHECK-NEXT: store i128 [[TMP4]], ptr [[TMP10]], align 1
187 ; CHECK-NEXT: store i64 [[QV]], ptr [[P]], align 8
188 ; CHECK-NEXT: [[TMP11:%.*]] = call ptr @__nsan_internal_get_raw_shadow_type_ptr(ptr [[P]])
189 ; CHECK-NEXT: store i64 [[TMP6]], ptr [[TMP11]], align 1
190 ; CHECK-NEXT: [[TMP12:%.*]] = call ptr @__nsan_internal_get_raw_shadow_ptr(ptr [[P]])
191 ; CHECK-NEXT: store i128 [[TMP8]], ptr [[TMP12]], align 1
192 ; CHECK-NEXT: ret void
194 %pv = load i64, ptr %p
195 %qv = load i64, ptr %q
196 store i64 %pv, ptr %q, align 8
197 store i64 %qv, ptr %p, align 8
201 define void @swap_ft1(float* nonnull align 8 %p, float* nonnull align 8 %q) sanitize_numerical_stability {
202 ; CHECK-LABEL: @swap_ft1(
203 ; CHECK-NEXT: [[QV:%.*]] = load float, ptr [[Q:%.*]], align 4
204 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[Q]], i64 1)
205 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP1]], null
206 ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
208 ; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[TMP1]], align 1
209 ; CHECK-NEXT: br label [[TMP7:%.*]]
211 ; CHECK-NEXT: [[TMP6:%.*]] = fpext float [[QV]] to double
212 ; CHECK-NEXT: br label [[TMP7]]
214 ; CHECK-NEXT: [[TMP8:%.*]] = phi double [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
215 ; CHECK-NEXT: [[PV:%.*]] = load float, ptr [[P:%.*]], align 4
216 ; CHECK-NEXT: [[TMP9:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[P]], i64 1)
217 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq ptr [[TMP9]], null
218 ; CHECK-NEXT: br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP11:%.*]]
220 ; CHECK-NEXT: [[TMP12:%.*]] = load double, ptr [[TMP9]], align 1
221 ; CHECK-NEXT: br label [[TMP15:%.*]]
223 ; CHECK-NEXT: [[TMP14:%.*]] = fpext float [[PV]] to double
224 ; CHECK-NEXT: br label [[TMP15]]
226 ; CHECK-NEXT: [[TMP16:%.*]] = phi double [ [[TMP12]], [[TMP11]] ], [ [[TMP14]], [[TMP13]] ]
227 ; CHECK-NEXT: [[TMP17:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[Q]], i64 1)
228 ; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[Q]] to i64
229 ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__nsan_internal_check_float_d(float [[PV]], double [[TMP16]], i32 4, i64 [[TMP18]])
230 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i32 [[TMP19]], 1
231 ; CHECK-NEXT: [[TMP21:%.*]] = fpext float [[PV]] to double
232 ; CHECK-NEXT: [[TMP22:%.*]] = select i1 [[TMP20]], double [[TMP21]], double [[TMP16]]
233 ; CHECK-NEXT: store double [[TMP22]], ptr [[TMP17]], align 1
234 ; CHECK-NEXT: store float [[PV]], ptr [[Q]], align 8
235 ; CHECK-NEXT: [[TMP23:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[P]], i64 1)
236 ; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[P]] to i64
237 ; CHECK-NEXT: [[TMP25:%.*]] = call i32 @__nsan_internal_check_float_d(float [[QV]], double [[TMP8]], i32 4, i64 [[TMP24]])
238 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[TMP25]], 1
239 ; CHECK-NEXT: [[TMP27:%.*]] = fpext float [[QV]] to double
240 ; CHECK-NEXT: [[TMP28:%.*]] = select i1 [[TMP26]], double [[TMP27]], double [[TMP8]]
241 ; CHECK-NEXT: store double [[TMP28]], ptr [[TMP23]], align 1
242 ; CHECK-NEXT: store float [[QV]], ptr [[P]], align 8
243 ; CHECK-NEXT: ret void
245 %qv = load float, ptr %q
246 %pv = load float, ptr %p
247 store float %pv, ptr %q, align 8
248 store float %qv, ptr %p, align 8
252 ; Same as swap_ft1, but the load/stores are in the opposite order.
253 define void @swap_ft2(float* nonnull align 8 %p, float* nonnull align 8 %q) sanitize_numerical_stability {
254 ; CHECK-LABEL: @swap_ft2(
255 ; CHECK-NEXT: [[PV:%.*]] = load float, ptr [[P:%.*]], align 4
256 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[P]], i64 1)
257 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP1]], null
258 ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
260 ; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[TMP1]], align 1
261 ; CHECK-NEXT: br label [[TMP7:%.*]]
263 ; CHECK-NEXT: [[TMP6:%.*]] = fpext float [[PV]] to double
264 ; CHECK-NEXT: br label [[TMP7]]
266 ; CHECK-NEXT: [[TMP8:%.*]] = phi double [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
267 ; CHECK-NEXT: [[QV:%.*]] = load float, ptr [[Q:%.*]], align 4
268 ; CHECK-NEXT: [[TMP9:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[Q]], i64 1)
269 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq ptr [[TMP9]], null
270 ; CHECK-NEXT: br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP11:%.*]]
272 ; CHECK-NEXT: [[TMP12:%.*]] = load double, ptr [[TMP9]], align 1
273 ; CHECK-NEXT: br label [[TMP15:%.*]]
275 ; CHECK-NEXT: [[TMP14:%.*]] = fpext float [[QV]] to double
276 ; CHECK-NEXT: br label [[TMP15]]
278 ; CHECK-NEXT: [[TMP16:%.*]] = phi double [ [[TMP12]], [[TMP11]] ], [ [[TMP14]], [[TMP13]] ]
279 ; CHECK-NEXT: [[TMP17:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[Q]], i64 1)
280 ; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[Q]] to i64
281 ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__nsan_internal_check_float_d(float [[PV]], double [[TMP8]], i32 4, i64 [[TMP18]])
282 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i32 [[TMP19]], 1
283 ; CHECK-NEXT: [[TMP21:%.*]] = fpext float [[PV]] to double
284 ; CHECK-NEXT: [[TMP22:%.*]] = select i1 [[TMP20]], double [[TMP21]], double [[TMP8]]
285 ; CHECK-NEXT: store double [[TMP22]], ptr [[TMP17]], align 1
286 ; CHECK-NEXT: store float [[PV]], ptr [[Q]], align 8
287 ; CHECK-NEXT: [[TMP23:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[P]], i64 1)
288 ; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[P]] to i64
289 ; CHECK-NEXT: [[TMP25:%.*]] = call i32 @__nsan_internal_check_float_d(float [[QV]], double [[TMP16]], i32 4, i64 [[TMP24]])
290 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[TMP25]], 1
291 ; CHECK-NEXT: [[TMP27:%.*]] = fpext float [[QV]] to double
292 ; CHECK-NEXT: [[TMP28:%.*]] = select i1 [[TMP26]], double [[TMP27]], double [[TMP16]]
293 ; CHECK-NEXT: store double [[TMP28]], ptr [[TMP23]], align 1
294 ; CHECK-NEXT: store float [[QV]], ptr [[P]], align 8
295 ; CHECK-NEXT: ret void
297 %pv = load float, ptr %p
298 %qv = load float, ptr %q
299 store float %pv, ptr %q, align 8
300 store float %qv, ptr %p, align 8
304 define void @swap_vectorft1(<2 x float>* nonnull align 16 %p, <2 x float>* nonnull align 16 %q) sanitize_numerical_stability {
305 ; CHECK-LABEL: @swap_vectorft1(
306 ; CHECK-NEXT: [[QV:%.*]] = load <2 x float>, ptr [[Q:%.*]], align 8
307 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[Q]], i64 2)
308 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP1]], null
309 ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
311 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr [[TMP1]], align 1
312 ; CHECK-NEXT: br label [[TMP7:%.*]]
314 ; CHECK-NEXT: [[TMP6:%.*]] = fpext <2 x float> [[QV]] to <2 x double>
315 ; CHECK-NEXT: br label [[TMP7]]
317 ; CHECK-NEXT: [[TMP8:%.*]] = phi <2 x double> [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
318 ; CHECK-NEXT: [[PV:%.*]] = load <2 x float>, ptr [[P:%.*]], align 8
319 ; CHECK-NEXT: [[TMP9:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[P]], i64 2)
320 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq ptr [[TMP9]], null
321 ; CHECK-NEXT: br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP11:%.*]]
323 ; CHECK-NEXT: [[TMP12:%.*]] = load <2 x double>, ptr [[TMP9]], align 1
324 ; CHECK-NEXT: br label [[TMP15:%.*]]
326 ; CHECK-NEXT: [[TMP14:%.*]] = fpext <2 x float> [[PV]] to <2 x double>
327 ; CHECK-NEXT: br label [[TMP15]]
329 ; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x double> [ [[TMP12]], [[TMP11]] ], [ [[TMP14]], [[TMP13]] ]
330 ; CHECK-NEXT: [[TMP17:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[Q]], i64 2)
331 ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x float> [[PV]], i64 0
332 ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x double> [[TMP16]], i64 0
333 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[Q]] to i64
334 ; CHECK-NEXT: [[TMP21:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP18]], double [[TMP19]], i32 4, i64 [[TMP20]])
335 ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x float> [[PV]], i64 1
336 ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x double> [[TMP16]], i64 1
337 ; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[Q]] to i64
338 ; CHECK-NEXT: [[TMP25:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP22]], double [[TMP23]], i32 4, i64 [[TMP24]])
339 ; CHECK-NEXT: [[TMP26:%.*]] = or i32 [[TMP21]], [[TMP25]]
340 ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i32 [[TMP26]], 1
341 ; CHECK-NEXT: [[TMP28:%.*]] = fpext <2 x float> [[PV]] to <2 x double>
342 ; CHECK-NEXT: [[TMP29:%.*]] = select i1 [[TMP27]], <2 x double> [[TMP28]], <2 x double> [[TMP16]]
343 ; CHECK-NEXT: store <2 x double> [[TMP29]], ptr [[TMP17]], align 1
344 ; CHECK-NEXT: store <2 x float> [[PV]], ptr [[Q]], align 16
345 ; CHECK-NEXT: [[TMP30:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[P]], i64 2)
346 ; CHECK-NEXT: [[TMP31:%.*]] = extractelement <2 x float> [[QV]], i64 0
347 ; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x double> [[TMP8]], i64 0
348 ; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[P]] to i64
349 ; CHECK-NEXT: [[TMP34:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP31]], double [[TMP32]], i32 4, i64 [[TMP33]])
350 ; CHECK-NEXT: [[TMP35:%.*]] = extractelement <2 x float> [[QV]], i64 1
351 ; CHECK-NEXT: [[TMP36:%.*]] = extractelement <2 x double> [[TMP8]], i64 1
352 ; CHECK-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[P]] to i64
353 ; CHECK-NEXT: [[TMP38:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP35]], double [[TMP36]], i32 4, i64 [[TMP37]])
354 ; CHECK-NEXT: [[TMP39:%.*]] = or i32 [[TMP34]], [[TMP38]]
355 ; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i32 [[TMP39]], 1
356 ; CHECK-NEXT: [[TMP41:%.*]] = fpext <2 x float> [[QV]] to <2 x double>
357 ; CHECK-NEXT: [[TMP42:%.*]] = select i1 [[TMP40]], <2 x double> [[TMP41]], <2 x double> [[TMP8]]
358 ; CHECK-NEXT: store <2 x double> [[TMP42]], ptr [[TMP30]], align 1
359 ; CHECK-NEXT: store <2 x float> [[QV]], ptr [[P]], align 16
360 ; CHECK-NEXT: ret void
362 %qv = load <2 x float>, ptr %q
363 %pv = load <2 x float>, ptr %p
364 store <2 x float> %pv, ptr %q, align 16
365 store <2 x float> %qv, ptr %p, align 16
369 ; Same as swap_vectorft1, but the load/stores are in the opposite order.
370 define void @swap_vectorft2(<2 x float>* nonnull align 16 %p, <2 x float>* nonnull align 16 %q) sanitize_numerical_stability {
371 ; CHECK-LABEL: @swap_vectorft2(
372 ; CHECK-NEXT: [[PV:%.*]] = load <2 x float>, ptr [[P:%.*]], align 8
373 ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[P]], i64 2)
374 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP1]], null
375 ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
377 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr [[TMP1]], align 1
378 ; CHECK-NEXT: br label [[TMP7:%.*]]
380 ; CHECK-NEXT: [[TMP6:%.*]] = fpext <2 x float> [[PV]] to <2 x double>
381 ; CHECK-NEXT: br label [[TMP7]]
383 ; CHECK-NEXT: [[TMP8:%.*]] = phi <2 x double> [ [[TMP4]], [[TMP3]] ], [ [[TMP6]], [[TMP5]] ]
384 ; CHECK-NEXT: [[QV:%.*]] = load <2 x float>, ptr [[Q:%.*]], align 8
385 ; CHECK-NEXT: [[TMP9:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_load(ptr [[Q]], i64 2)
386 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq ptr [[TMP9]], null
387 ; CHECK-NEXT: br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP11:%.*]]
389 ; CHECK-NEXT: [[TMP12:%.*]] = load <2 x double>, ptr [[TMP9]], align 1
390 ; CHECK-NEXT: br label [[TMP15:%.*]]
392 ; CHECK-NEXT: [[TMP14:%.*]] = fpext <2 x float> [[QV]] to <2 x double>
393 ; CHECK-NEXT: br label [[TMP15]]
395 ; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x double> [ [[TMP12]], [[TMP11]] ], [ [[TMP14]], [[TMP13]] ]
396 ; CHECK-NEXT: [[TMP17:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[Q]], i64 2)
397 ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <2 x float> [[PV]], i64 0
398 ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x double> [[TMP8]], i64 0
399 ; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[Q]] to i64
400 ; CHECK-NEXT: [[TMP21:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP18]], double [[TMP19]], i32 4, i64 [[TMP20]])
401 ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x float> [[PV]], i64 1
402 ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x double> [[TMP8]], i64 1
403 ; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[Q]] to i64
404 ; CHECK-NEXT: [[TMP25:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP22]], double [[TMP23]], i32 4, i64 [[TMP24]])
405 ; CHECK-NEXT: [[TMP26:%.*]] = or i32 [[TMP21]], [[TMP25]]
406 ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i32 [[TMP26]], 1
407 ; CHECK-NEXT: [[TMP28:%.*]] = fpext <2 x float> [[PV]] to <2 x double>
408 ; CHECK-NEXT: [[TMP29:%.*]] = select i1 [[TMP27]], <2 x double> [[TMP28]], <2 x double> [[TMP8]]
409 ; CHECK-NEXT: store <2 x double> [[TMP29]], ptr [[TMP17]], align 1
410 ; CHECK-NEXT: store <2 x float> [[PV]], ptr [[Q]], align 16
411 ; CHECK-NEXT: [[TMP30:%.*]] = call ptr @__nsan_get_shadow_ptr_for_float_store(ptr [[P]], i64 2)
412 ; CHECK-NEXT: [[TMP31:%.*]] = extractelement <2 x float> [[QV]], i64 0
413 ; CHECK-NEXT: [[TMP32:%.*]] = extractelement <2 x double> [[TMP16]], i64 0
414 ; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[P]] to i64
415 ; CHECK-NEXT: [[TMP34:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP31]], double [[TMP32]], i32 4, i64 [[TMP33]])
416 ; CHECK-NEXT: [[TMP35:%.*]] = extractelement <2 x float> [[QV]], i64 1
417 ; CHECK-NEXT: [[TMP36:%.*]] = extractelement <2 x double> [[TMP16]], i64 1
418 ; CHECK-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[P]] to i64
419 ; CHECK-NEXT: [[TMP38:%.*]] = call i32 @__nsan_internal_check_float_d(float [[TMP35]], double [[TMP36]], i32 4, i64 [[TMP37]])
420 ; CHECK-NEXT: [[TMP39:%.*]] = or i32 [[TMP34]], [[TMP38]]
421 ; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i32 [[TMP39]], 1
422 ; CHECK-NEXT: [[TMP41:%.*]] = fpext <2 x float> [[QV]] to <2 x double>
423 ; CHECK-NEXT: [[TMP42:%.*]] = select i1 [[TMP40]], <2 x double> [[TMP41]], <2 x double> [[TMP16]]
424 ; CHECK-NEXT: store <2 x double> [[TMP42]], ptr [[TMP30]], align 1
425 ; CHECK-NEXT: store <2 x float> [[QV]], ptr [[P]], align 16
426 ; CHECK-NEXT: ret void
428 %pv = load <2 x float>, ptr %p
429 %qv = load <2 x float>, ptr %q
430 store <2 x float> %pv, ptr %q, align 16
431 store <2 x float> %qv, ptr %p, align 16