1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s --implicit-check-not="call void @__msan_warning"
3 ; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ADDR --implicit-check-not="call void @__msan_warning"
4 ; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ORIGINS --implicit-check-not="call void @__msan_warning"
7 ; RUN: opt < %s -msan-check-access-address=1 -msan-track-origins=1 -S -passes=msan -msan-kernel
9 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
10 target triple = "x86_64-unknown-linux-gnu"
12 declare void @llvm.masked.store.v4i64.p0(<4 x i64>, ptr, i32, <4 x i1>)
13 declare <4 x double> @llvm.masked.load.v4f64.p0(ptr, i32, <4 x i1>, <4 x double>)
14 declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
15 declare void @llvm.masked.scatter.v8i32.v8p0 (<8 x i32>, <8 x ptr>, i32, <8 x i1>)
16 declare <16 x float> @llvm.masked.expandload.v16f32(ptr, <16 x i1>, <16 x float>)
17 declare void @llvm.masked.compressstore.v16f32(<16 x float>, ptr, <16 x i1>)
19 define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
20 ; CHECK-LABEL: @Store(
22 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
23 ; CHECK-NEXT: call void @llvm.donothing()
24 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
25 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
26 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
27 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]])
28 ; CHECK-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
29 ; CHECK-NEXT: ret void
33 ; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
34 ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
35 ; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
36 ; ADDR-NEXT: call void @llvm.donothing()
37 ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
38 ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
39 ; ADDR-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
40 ; ADDR-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]])
41 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
42 ; ADDR-NEXT: [[TMP6:%.*]] = bitcast <4 x i1> [[TMP2]] to i4
43 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0
44 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
45 ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]]
47 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7:[0-9]+]]
48 ; ADDR-NEXT: unreachable
50 ; ADDR-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
53 ; ORIGINS-LABEL: @Store(
54 ; ORIGINS-NEXT: entry:
55 ; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
56 ; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
57 ; ORIGINS-NEXT: call void @llvm.donothing()
58 ; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64
59 ; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
60 ; ORIGINS-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
61 ; ORIGINS-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
62 ; ORIGINS-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -4
63 ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
64 ; ORIGINS-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]])
65 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP7]], align 4
66 ; ORIGINS-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 1
67 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP8]], align 4
68 ; ORIGINS-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP7]], i32 2
69 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP9]], align 4
70 ; ORIGINS-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[TMP7]], i32 3
71 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP10]], align 4
72 ; ORIGINS-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP7]], i32 4
73 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP11]], align 4
74 ; ORIGINS-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP7]], i32 5
75 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP12]], align 4
76 ; ORIGINS-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP7]], i32 6
77 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP13]], align 4
78 ; ORIGINS-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP7]], i32 7
79 ; ORIGINS-NEXT: store i32 [[TMP1]], ptr [[TMP14]], align 4
80 ; ORIGINS-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
81 ; ORIGINS-NEXT: ret void
84 tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %v, ptr %p, i32 1, <4 x i1> %mask)
88 define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory {
91 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
92 ; CHECK-NEXT: call void @llvm.donothing()
93 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
94 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
95 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
96 ; CHECK-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
97 ; CHECK-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
98 ; CHECK-NEXT: store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8
99 ; CHECK-NEXT: ret <4 x double> [[X]]
103 ; ADDR-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
104 ; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
105 ; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
106 ; ADDR-NEXT: call void @llvm.donothing()
107 ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
108 ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
109 ; ADDR-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
110 ; ADDR-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP2]])
111 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP0]], 0
112 ; ADDR-NEXT: [[TMP6:%.*]] = bitcast <4 x i1> [[TMP1]] to i4
113 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0
114 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
115 ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
117 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
118 ; ADDR-NEXT: unreachable
120 ; ADDR-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
121 ; ADDR-NEXT: store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8
122 ; ADDR-NEXT: ret <4 x double> [[X]]
124 ; ORIGINS-LABEL: @Load(
125 ; ORIGINS-NEXT: entry:
126 ; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
127 ; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
128 ; ORIGINS-NEXT: call void @llvm.donothing()
129 ; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64
130 ; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
131 ; ORIGINS-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
132 ; ORIGINS-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
133 ; ORIGINS-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -4
134 ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
135 ; ORIGINS-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
136 ; ORIGINS-NEXT: [[TMP8:%.*]] = sub <4 x i1> zeroinitializer, [[MASK]]
137 ; ORIGINS-NEXT: [[TMP9:%.*]] = sext <4 x i1> [[TMP8]] to <4 x i64>
138 ; ORIGINS-NEXT: [[TMP10:%.*]] = and <4 x i64> [[TMP0]], [[TMP9]]
139 ; ORIGINS-NEXT: [[TMP11:%.*]] = bitcast <4 x i64> [[TMP10]] to i256
140 ; ORIGINS-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP11]], 0
141 ; ORIGINS-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP7]], align 4
142 ; ORIGINS-NEXT: [[TMP13:%.*]] = select i1 [[_MSCMP]], i32 [[TMP1]], i32 [[TMP12]]
143 ; ORIGINS-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
144 ; ORIGINS-NEXT: store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8
145 ; ORIGINS-NEXT: store i32 [[TMP13]], ptr @__msan_retval_origin_tls, align 4
146 ; ORIGINS-NEXT: ret <4 x double> [[X]]
149 %x = call <4 x double> @llvm.masked.load.v4f64.p0(ptr %p, i32 1, <4 x i1> %mask, <4 x double> %v)
153 define void @StoreNoSanitize(ptr %p, <4 x i64> %v, <4 x i1> %mask) {
154 ; CHECK-LABEL: @StoreNoSanitize(
156 ; CHECK-NEXT: call void @llvm.donothing()
157 ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64
158 ; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
159 ; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
160 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
161 ; CHECK-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
162 ; CHECK-NEXT: ret void
164 ; ADDR-LABEL: @StoreNoSanitize(
166 ; ADDR-NEXT: call void @llvm.donothing()
167 ; ADDR-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64
168 ; ADDR-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
169 ; ADDR-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
170 ; ADDR-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
171 ; ADDR-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
172 ; ADDR-NEXT: ret void
174 ; ORIGINS-LABEL: @StoreNoSanitize(
175 ; ORIGINS-NEXT: entry:
176 ; ORIGINS-NEXT: call void @llvm.donothing()
177 ; ORIGINS-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64
178 ; ORIGINS-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
179 ; ORIGINS-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
180 ; ORIGINS-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416
181 ; ORIGINS-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4
182 ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
183 ; ORIGINS-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
184 ; ORIGINS-NEXT: store i32 0, ptr [[TMP5]], align 4
185 ; ORIGINS-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 1
186 ; ORIGINS-NEXT: store i32 0, ptr [[TMP6]], align 4
187 ; ORIGINS-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP5]], i32 2
188 ; ORIGINS-NEXT: store i32 0, ptr [[TMP7]], align 4
189 ; ORIGINS-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP5]], i32 3
190 ; ORIGINS-NEXT: store i32 0, ptr [[TMP8]], align 4
191 ; ORIGINS-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP5]], i32 4
192 ; ORIGINS-NEXT: store i32 0, ptr [[TMP9]], align 4
193 ; ORIGINS-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[TMP5]], i32 5
194 ; ORIGINS-NEXT: store i32 0, ptr [[TMP10]], align 4
195 ; ORIGINS-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP5]], i32 6
196 ; ORIGINS-NEXT: store i32 0, ptr [[TMP11]], align 4
197 ; ORIGINS-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i32 7
198 ; ORIGINS-NEXT: store i32 0, ptr [[TMP12]], align 4
199 ; ORIGINS-NEXT: tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
200 ; ORIGINS-NEXT: ret void
203 tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %v, ptr %p, i32 1, <4 x i1> %mask)
207 define <4 x double> @LoadNoSanitize(ptr %p, <4 x double> %v, <4 x i1> %mask) {
208 ; CHECK-LABEL: @LoadNoSanitize(
210 ; CHECK-NEXT: call void @llvm.donothing()
211 ; CHECK-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
212 ; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
213 ; CHECK-NEXT: ret <4 x double> [[X]]
215 ; ADDR-LABEL: @LoadNoSanitize(
217 ; ADDR-NEXT: call void @llvm.donothing()
218 ; ADDR-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
219 ; ADDR-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
220 ; ADDR-NEXT: ret <4 x double> [[X]]
222 ; ORIGINS-LABEL: @LoadNoSanitize(
223 ; ORIGINS-NEXT: entry:
224 ; ORIGINS-NEXT: call void @llvm.donothing()
225 ; ORIGINS-NEXT: [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
226 ; ORIGINS-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
227 ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
228 ; ORIGINS-NEXT: ret <4 x double> [[X]]
231 %x = call <4 x double> @llvm.masked.load.v4f64.p0(ptr %p, i32 1, <4 x i1> %mask, <4 x double> %v)
235 ; FIXME: Provide real implementation.
236 define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
237 ; CHECK-LABEL: @Gather(
238 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
239 ; CHECK-NEXT: call void @llvm.donothing()
240 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
241 ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i64> [[TMP2]], splat (i64 87960930222080)
242 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr <16 x i64> [[TMP3]] to <16 x ptr>
243 ; CHECK-NEXT: [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP4]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
244 ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
245 ; CHECK-NEXT: store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8
246 ; CHECK-NEXT: ret <16 x float> [[RET]]
248 ; ADDR-LABEL: @Gather(
249 ; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
250 ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8
251 ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
252 ; ADDR-NEXT: call void @llvm.donothing()
253 ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> [[TMP2]], <16 x i64> zeroinitializer
254 ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
255 ; ADDR-NEXT: [[TMP5:%.*]] = xor <16 x i64> [[TMP4]], splat (i64 87960930222080)
256 ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr <16 x i64> [[TMP5]] to <16 x ptr>
257 ; ADDR-NEXT: [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP6]], i32 4, <16 x i1> [[MASK]], <16 x i32> [[TMP3]])
258 ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <16 x i1> [[TMP1]] to i16
259 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i16 [[TMP7]], 0
260 ; ADDR-NEXT: [[TMP8:%.*]] = bitcast <16 x i64> [[_MSMASKEDPTRS]] to i1024
261 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i1024 [[TMP8]], 0
262 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
263 ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
265 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
266 ; ADDR-NEXT: unreachable
268 ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
269 ; ADDR-NEXT: store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8
270 ; ADDR-NEXT: ret <16 x float> [[RET]]
272 ; ORIGINS-LABEL: @Gather(
273 ; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
274 ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 136) to ptr), align 4
275 ; ORIGINS-NEXT: call void @llvm.donothing()
276 ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
277 ; ORIGINS-NEXT: [[TMP4:%.*]] = xor <16 x i64> [[TMP3]], splat (i64 87960930222080)
278 ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr <16 x i64> [[TMP4]] to <16 x ptr>
279 ; ORIGINS-NEXT: [[TMP6:%.*]] = add <16 x i64> [[TMP4]], splat (i64 17592186044416)
280 ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr <16 x i64> [[TMP6]] to <16 x ptr>
281 ; ORIGINS-NEXT: [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP5]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
282 ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
283 ; ORIGINS-NEXT: store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8
284 ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
285 ; ORIGINS-NEXT: ret <16 x float> [[RET]]
287 %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru)
288 ret <16 x float> %ret
291 define <16 x float> @GatherNoSanitize(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) {
292 ; CHECK-LABEL: @GatherNoSanitize(
293 ; CHECK-NEXT: call void @llvm.donothing()
294 ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
295 ; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
296 ; CHECK-NEXT: ret <16 x float> [[RET]]
298 ; ADDR-LABEL: @GatherNoSanitize(
299 ; ADDR-NEXT: call void @llvm.donothing()
300 ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> zeroinitializer, <16 x i64> zeroinitializer
301 ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
302 ; ADDR-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
303 ; ADDR-NEXT: ret <16 x float> [[RET]]
305 ; ORIGINS-LABEL: @GatherNoSanitize(
306 ; ORIGINS-NEXT: call void @llvm.donothing()
307 ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
308 ; ORIGINS-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
309 ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
310 ; ORIGINS-NEXT: ret <16 x float> [[RET]]
312 %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru)
313 ret <16 x float> %ret
316 ; FIXME: Provide real implementation.
317 define void @Scatter(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) sanitize_memory {
318 ; CHECK-LABEL: @Scatter(
319 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
320 ; CHECK-NEXT: call void @llvm.donothing()
321 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
322 ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[TMP2]], splat (i64 87960930222080)
323 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr <8 x i64> [[TMP3]] to <8 x ptr>
324 ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP4]], i32 8, <8 x i1> [[MASK:%.*]])
325 ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
326 ; CHECK-NEXT: ret void
328 ; ADDR-LABEL: @Scatter(
329 ; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
330 ; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
331 ; ADDR-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
332 ; ADDR-NEXT: call void @llvm.donothing()
333 ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> [[TMP2]], <8 x i64> zeroinitializer
334 ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
335 ; ADDR-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[TMP4]], splat (i64 87960930222080)
336 ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr <8 x i64> [[TMP5]] to <8 x ptr>
337 ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP3]], <8 x ptr> [[TMP6]], i32 8, <8 x i1> [[MASK]])
338 ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <8 x i1> [[TMP1]] to i8
339 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i8 [[TMP7]], 0
340 ; ADDR-NEXT: [[TMP8:%.*]] = bitcast <8 x i64> [[_MSMASKEDPTRS]] to i512
341 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP8]], 0
342 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
343 ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
345 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
346 ; ADDR-NEXT: unreachable
348 ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
349 ; ADDR-NEXT: ret void
351 ; ORIGINS-LABEL: @Scatter(
352 ; ORIGINS-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
353 ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
354 ; ORIGINS-NEXT: call void @llvm.donothing()
355 ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
356 ; ORIGINS-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[TMP3]], splat (i64 87960930222080)
357 ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x ptr>
358 ; ORIGINS-NEXT: [[TMP6:%.*]] = add <8 x i64> [[TMP4]], splat (i64 17592186044416)
359 ; ORIGINS-NEXT: [[TMP7:%.*]] = inttoptr <8 x i64> [[TMP6]] to <8 x ptr>
360 ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP5]], i32 8, <8 x i1> [[MASK:%.*]])
361 ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
362 ; ORIGINS-NEXT: ret void
364 call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 8, <8 x i1> %mask)
368 define void @ScatterNoSanitize(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) {
369 ; CHECK-LABEL: @ScatterNoSanitize(
370 ; CHECK-NEXT: call void @llvm.donothing()
371 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
372 ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 87960930222080)
373 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr>
374 ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]])
375 ; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
376 ; CHECK-NEXT: ret void
378 ; ADDR-LABEL: @ScatterNoSanitize(
379 ; ADDR-NEXT: call void @llvm.donothing()
380 ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> zeroinitializer, <8 x i64> zeroinitializer
381 ; ADDR-NEXT: [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
382 ; ADDR-NEXT: [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 87960930222080)
383 ; ADDR-NEXT: [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr>
384 ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK]])
385 ; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
386 ; ADDR-NEXT: ret void
388 ; ORIGINS-LABEL: @ScatterNoSanitize(
389 ; ORIGINS-NEXT: call void @llvm.donothing()
390 ; ORIGINS-NEXT: [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
391 ; ORIGINS-NEXT: [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 87960930222080)
392 ; ORIGINS-NEXT: [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr>
393 ; ORIGINS-NEXT: [[TMP4:%.*]] = add <8 x i64> [[TMP2]], splat (i64 17592186044416)
394 ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x ptr>
395 ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]])
396 ; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
397 ; ORIGINS-NEXT: ret void
399 call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 8, <8 x i1> %mask)
403 ; FIXME: Provide real implementation.
404 define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
405 ; CHECK-LABEL: @ExpandLoad(
406 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
407 ; CHECK-NEXT: call void @llvm.donothing()
408 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
409 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
410 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
411 ; CHECK-NEXT: [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP4]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
412 ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
413 ; CHECK-NEXT: store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8
414 ; CHECK-NEXT: ret <16 x float> [[RET]]
416 ; ADDR-LABEL: @ExpandLoad(
417 ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
418 ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
419 ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
420 ; ADDR-NEXT: call void @llvm.donothing()
421 ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
422 ; ADDR-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
423 ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
424 ; ADDR-NEXT: [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP6]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP3]])
425 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
426 ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <16 x i1> [[TMP2]] to i16
427 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i16 [[TMP7]], 0
428 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
429 ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
431 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
432 ; ADDR-NEXT: unreachable
434 ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
435 ; ADDR-NEXT: store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8
436 ; ADDR-NEXT: ret <16 x float> [[RET]]
438 ; ORIGINS-LABEL: @ExpandLoad(
439 ; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
440 ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
441 ; ORIGINS-NEXT: call void @llvm.donothing()
442 ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
443 ; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
444 ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
445 ; ORIGINS-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
446 ; ORIGINS-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -4
447 ; ORIGINS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
448 ; ORIGINS-NEXT: [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP5]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
449 ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
450 ; ORIGINS-NEXT: store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8
451 ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
452 ; ORIGINS-NEXT: ret <16 x float> [[RET]]
454 %ret = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru)
455 ret <16 x float> %ret
458 define <16 x float> @ExpandLoadNoSanitize(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) {
459 ; CHECK-LABEL: @ExpandLoadNoSanitize(
460 ; CHECK-NEXT: call void @llvm.donothing()
461 ; CHECK-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
462 ; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
463 ; CHECK-NEXT: ret <16 x float> [[RET]]
465 ; ADDR-LABEL: @ExpandLoadNoSanitize(
466 ; ADDR-NEXT: call void @llvm.donothing()
467 ; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
468 ; ADDR-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
469 ; ADDR-NEXT: ret <16 x float> [[RET]]
471 ; ORIGINS-LABEL: @ExpandLoadNoSanitize(
472 ; ORIGINS-NEXT: call void @llvm.donothing()
473 ; ORIGINS-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
474 ; ORIGINS-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
475 ; ORIGINS-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
476 ; ORIGINS-NEXT: ret <16 x float> [[RET]]
478 %ret = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru)
479 ret <16 x float> %ret
482 ; FIXME: Provide real implementation.
483 define void @CompressStore(<16 x float> %value, ptr %ptr, <16 x i1> %mask) sanitize_memory {
484 ; CHECK-LABEL: @CompressStore(
485 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
486 ; CHECK-NEXT: call void @llvm.donothing()
487 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
488 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
489 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
490 ; CHECK-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], ptr [[TMP4]], <16 x i1> [[MASK:%.*]])
491 ; CHECK-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
492 ; CHECK-NEXT: ret void
494 ; ADDR-LABEL: @CompressStore(
495 ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
496 ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
497 ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
498 ; ADDR-NEXT: call void @llvm.donothing()
499 ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
500 ; ADDR-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
501 ; ADDR-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
502 ; ADDR-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP3]], ptr [[TMP6]], <16 x i1> [[MASK:%.*]])
503 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
504 ; ADDR-NEXT: [[TMP7:%.*]] = bitcast <16 x i1> [[TMP2]] to i16
505 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i16 [[TMP7]], 0
506 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
507 ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
509 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
510 ; ADDR-NEXT: unreachable
512 ; ADDR-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
513 ; ADDR-NEXT: ret void
515 ; ORIGINS-LABEL: @CompressStore(
516 ; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
517 ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
518 ; ORIGINS-NEXT: call void @llvm.donothing()
519 ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
520 ; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
521 ; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
522 ; ORIGINS-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
523 ; ORIGINS-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -4
524 ; ORIGINS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
525 ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], ptr [[TMP5]], <16 x i1> [[MASK:%.*]])
526 ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
527 ; ORIGINS-NEXT: ret void
529 call void @llvm.masked.compressstore.v16f32(<16 x float> %value, ptr %ptr, <16 x i1> %mask)
533 define void @CompressStoreNoSanitize(<16 x float> %value, ptr %ptr, <16 x i1> %mask) {
534 ; CHECK-LABEL: @CompressStoreNoSanitize(
535 ; CHECK-NEXT: call void @llvm.donothing()
536 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
537 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
538 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
539 ; CHECK-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]])
540 ; CHECK-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
541 ; CHECK-NEXT: ret void
543 ; ADDR-LABEL: @CompressStoreNoSanitize(
544 ; ADDR-NEXT: call void @llvm.donothing()
545 ; ADDR-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
546 ; ADDR-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
547 ; ADDR-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
548 ; ADDR-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]])
549 ; ADDR-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
550 ; ADDR-NEXT: ret void
552 ; ORIGINS-LABEL: @CompressStoreNoSanitize(
553 ; ORIGINS-NEXT: call void @llvm.donothing()
554 ; ORIGINS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
555 ; ORIGINS-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
556 ; ORIGINS-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
557 ; ORIGINS-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
558 ; ORIGINS-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4
559 ; ORIGINS-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
560 ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]])
561 ; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
562 ; ORIGINS-NEXT: ret void
564 call void @llvm.masked.compressstore.v16f32(<16 x float> %value, ptr %ptr, <16 x i1> %mask)