1 ; RUN: llc < %s -verify-machineinstrs
3 ; This test is disabled until PPCISelLowering learns to insert proper 64-bit
4 ; code for ATOMIC_CMP_SWAP. Currently, it is inserting 32-bit instructions with
5 ; 64-bit operands which causes the machine code verifier to throw a tantrum.
9 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
10 target triple = "powerpc64-unknown-linux-gnu"
12 @sc = common global i8 0
13 @uc = common global i8 0
14 @ss = common global i16 0
15 @us = common global i16 0
16 @si = common global i32 0
17 @ui = common global i32 0
18 @sl = common global i64 0, align 8
19 @ul = common global i64 0, align 8
20 @sll = common global i64 0, align 8
21 @ull = common global i64 0, align 8
23 define void @test_op_ignore() nounwind {
25 %0 = atomicrmw add i8* @sc, i8 1 monotonic
26 %1 = atomicrmw add i8* @uc, i8 1 monotonic
27 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
28 %3 = atomicrmw add i16* %2, i16 1 monotonic
29 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
30 %5 = atomicrmw add i16* %4, i16 1 monotonic
31 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
32 %7 = atomicrmw add i32* %6, i32 1 monotonic
33 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
34 %9 = atomicrmw add i32* %8, i32 1 monotonic
35 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
36 %11 = atomicrmw add i64* %10, i64 1 monotonic
37 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
38 %13 = atomicrmw add i64* %12, i64 1 monotonic
39 %14 = atomicrmw sub i8* @sc, i8 1 monotonic
40 %15 = atomicrmw sub i8* @uc, i8 1 monotonic
41 %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
42 %17 = atomicrmw sub i16* %16, i16 1 monotonic
43 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
44 %19 = atomicrmw sub i16* %18, i16 1 monotonic
45 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
46 %21 = atomicrmw sub i32* %20, i32 1 monotonic
47 %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
48 %23 = atomicrmw sub i32* %22, i32 1 monotonic
49 %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
50 %25 = atomicrmw sub i64* %24, i64 1 monotonic
51 %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
52 %27 = atomicrmw sub i64* %26, i64 1 monotonic
53 %28 = atomicrmw or i8* @sc, i8 1 monotonic
54 %29 = atomicrmw or i8* @uc, i8 1 monotonic
55 %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
56 %31 = atomicrmw or i16* %30, i16 1 monotonic
57 %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
58 %33 = atomicrmw or i16* %32, i16 1 monotonic
59 %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
60 %35 = atomicrmw or i32* %34, i32 1 monotonic
61 %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
62 %37 = atomicrmw or i32* %36, i32 1 monotonic
63 %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
64 %39 = atomicrmw or i64* %38, i64 1 monotonic
65 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
66 %41 = atomicrmw or i64* %40, i64 1 monotonic
67 %42 = atomicrmw xor i8* @sc, i8 1 monotonic
68 %43 = atomicrmw xor i8* @uc, i8 1 monotonic
69 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
70 %45 = atomicrmw xor i16* %44, i16 1 monotonic
71 %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
72 %47 = atomicrmw xor i16* %46, i16 1 monotonic
73 %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
74 %49 = atomicrmw xor i32* %48, i32 1 monotonic
75 %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
76 %51 = atomicrmw xor i32* %50, i32 1 monotonic
77 %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
78 %53 = atomicrmw xor i64* %52, i64 1 monotonic
79 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
80 %55 = atomicrmw xor i64* %54, i64 1 monotonic
81 %56 = atomicrmw and i8* @sc, i8 1 monotonic
82 %57 = atomicrmw and i8* @uc, i8 1 monotonic
83 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
84 %59 = atomicrmw and i16* %58, i16 1 monotonic
85 %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
86 %61 = atomicrmw and i16* %60, i16 1 monotonic
87 %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
88 %63 = atomicrmw and i32* %62, i32 1 monotonic
89 %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
90 %65 = atomicrmw and i32* %64, i32 1 monotonic
91 %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
92 %67 = atomicrmw and i64* %66, i64 1 monotonic
93 %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
94 %69 = atomicrmw and i64* %68, i64 1 monotonic
95 %70 = atomicrmw nand i8* @sc, i8 1 monotonic
96 %71 = atomicrmw nand i8* @uc, i8 1 monotonic
97 %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
98 %73 = atomicrmw nand i16* %72, i16 1 monotonic
99 %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
100 %75 = atomicrmw nand i16* %74, i16 1 monotonic
101 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
102 %77 = atomicrmw nand i32* %76, i32 1 monotonic
103 %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
104 %79 = atomicrmw nand i32* %78, i32 1 monotonic
105 %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
106 %81 = atomicrmw nand i64* %80, i64 1 monotonic
107 %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
108 %83 = atomicrmw nand i64* %82, i64 1 monotonic
111 return: ; preds = %entry
115 define void @test_fetch_and_op() nounwind {
117 %0 = atomicrmw add i8* @sc, i8 11 monotonic
118 store i8 %0, i8* @sc, align 1
119 %1 = atomicrmw add i8* @uc, i8 11 monotonic
120 store i8 %1, i8* @uc, align 1
121 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
122 %3 = atomicrmw add i16* %2, i16 11 monotonic
123 store i16 %3, i16* @ss, align 2
124 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
125 %5 = atomicrmw add i16* %4, i16 11 monotonic
126 store i16 %5, i16* @us, align 2
127 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
128 %7 = atomicrmw add i32* %6, i32 11 monotonic
129 store i32 %7, i32* @si, align 4
130 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
131 %9 = atomicrmw add i32* %8, i32 11 monotonic
132 store i32 %9, i32* @ui, align 4
133 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
134 %11 = atomicrmw add i64* %10, i64 11 monotonic
135 store i64 %11, i64* @sl, align 8
136 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
137 %13 = atomicrmw add i64* %12, i64 11 monotonic
138 store i64 %13, i64* @ul, align 8
139 %14 = atomicrmw sub i8* @sc, i8 11 monotonic
140 store i8 %14, i8* @sc, align 1
141 %15 = atomicrmw sub i8* @uc, i8 11 monotonic
142 store i8 %15, i8* @uc, align 1
143 %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
144 %17 = atomicrmw sub i16* %16, i16 11 monotonic
145 store i16 %17, i16* @ss, align 2
146 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
147 %19 = atomicrmw sub i16* %18, i16 11 monotonic
148 store i16 %19, i16* @us, align 2
149 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
150 %21 = atomicrmw sub i32* %20, i32 11 monotonic
151 store i32 %21, i32* @si, align 4
152 %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
153 %23 = atomicrmw sub i32* %22, i32 11 monotonic
154 store i32 %23, i32* @ui, align 4
155 %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
156 %25 = atomicrmw sub i64* %24, i64 11 monotonic
157 store i64 %25, i64* @sl, align 8
158 %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
159 %27 = atomicrmw sub i64* %26, i64 11 monotonic
160 store i64 %27, i64* @ul, align 8
161 %28 = atomicrmw or i8* @sc, i8 11 monotonic
162 store i8 %28, i8* @sc, align 1
163 %29 = atomicrmw or i8* @uc, i8 11 monotonic
164 store i8 %29, i8* @uc, align 1
165 %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
166 %31 = atomicrmw or i16* %30, i16 11 monotonic
167 store i16 %31, i16* @ss, align 2
168 %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
169 %33 = atomicrmw or i16* %32, i16 11 monotonic
170 store i16 %33, i16* @us, align 2
171 %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
172 %35 = atomicrmw or i32* %34, i32 11 monotonic
173 store i32 %35, i32* @si, align 4
174 %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
175 %37 = atomicrmw or i32* %36, i32 11 monotonic
176 store i32 %37, i32* @ui, align 4
177 %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
178 %39 = atomicrmw or i64* %38, i64 11 monotonic
179 store i64 %39, i64* @sl, align 8
180 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
181 %41 = atomicrmw or i64* %40, i64 11 monotonic
182 store i64 %41, i64* @ul, align 8
183 %42 = atomicrmw xor i8* @sc, i8 11 monotonic
184 store i8 %42, i8* @sc, align 1
185 %43 = atomicrmw xor i8* @uc, i8 11 monotonic
186 store i8 %43, i8* @uc, align 1
187 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
188 %45 = atomicrmw xor i16* %44, i16 11 monotonic
189 store i16 %45, i16* @ss, align 2
190 %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
191 %47 = atomicrmw xor i16* %46, i16 11 monotonic
192 store i16 %47, i16* @us, align 2
193 %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
194 %49 = atomicrmw xor i32* %48, i32 11 monotonic
195 store i32 %49, i32* @si, align 4
196 %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
197 %51 = atomicrmw xor i32* %50, i32 11 monotonic
198 store i32 %51, i32* @ui, align 4
199 %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
200 %53 = atomicrmw xor i64* %52, i64 11 monotonic
201 store i64 %53, i64* @sl, align 8
202 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
203 %55 = atomicrmw xor i64* %54, i64 11 monotonic
204 store i64 %55, i64* @ul, align 8
205 %56 = atomicrmw and i8* @sc, i8 11 monotonic
206 store i8 %56, i8* @sc, align 1
207 %57 = atomicrmw and i8* @uc, i8 11 monotonic
208 store i8 %57, i8* @uc, align 1
209 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
210 %59 = atomicrmw and i16* %58, i16 11 monotonic
211 store i16 %59, i16* @ss, align 2
212 %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
213 %61 = atomicrmw and i16* %60, i16 11 monotonic
214 store i16 %61, i16* @us, align 2
215 %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
216 %63 = atomicrmw and i32* %62, i32 11 monotonic
217 store i32 %63, i32* @si, align 4
218 %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
219 %65 = atomicrmw and i32* %64, i32 11 monotonic
220 store i32 %65, i32* @ui, align 4
221 %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
222 %67 = atomicrmw and i64* %66, i64 11 monotonic
223 store i64 %67, i64* @sl, align 8
224 %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
225 %69 = atomicrmw and i64* %68, i64 11 monotonic
226 store i64 %69, i64* @ul, align 8
227 %70 = atomicrmw nand i8* @sc, i8 11 monotonic
228 store i8 %70, i8* @sc, align 1
229 %71 = atomicrmw nand i8* @uc, i8 11 monotonic
230 store i8 %71, i8* @uc, align 1
231 %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
232 %73 = atomicrmw nand i16* %72, i16 11 monotonic
233 store i16 %73, i16* @ss, align 2
234 %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
235 %75 = atomicrmw nand i16* %74, i16 11 monotonic
236 store i16 %75, i16* @us, align 2
237 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
238 %77 = atomicrmw nand i32* %76, i32 11 monotonic
239 store i32 %77, i32* @si, align 4
240 %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
241 %79 = atomicrmw nand i32* %78, i32 11 monotonic
242 store i32 %79, i32* @ui, align 4
243 %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
244 %81 = atomicrmw nand i64* %80, i64 11 monotonic
245 store i64 %81, i64* @sl, align 8
246 %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
247 %83 = atomicrmw nand i64* %82, i64 11 monotonic
248 store i64 %83, i64* @ul, align 8
251 return: ; preds = %entry
255 define void @test_op_and_fetch() nounwind {
257 %0 = load i8, i8* @uc, align 1
258 %1 = atomicrmw add i8* @sc, i8 %0 monotonic
260 store i8 %2, i8* @sc, align 1
261 %3 = load i8, i8* @uc, align 1
262 %4 = atomicrmw add i8* @uc, i8 %3 monotonic
264 store i8 %5, i8* @uc, align 1
265 %6 = load i8, i8* @uc, align 1
266 %7 = zext i8 %6 to i16
267 %8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
268 %9 = atomicrmw add i16* %8, i16 %7 monotonic
270 store i16 %10, i16* @ss, align 2
271 %11 = load i8, i8* @uc, align 1
272 %12 = zext i8 %11 to i16
273 %13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
274 %14 = atomicrmw add i16* %13, i16 %12 monotonic
275 %15 = add i16 %14, %12
276 store i16 %15, i16* @us, align 2
277 %16 = load i8, i8* @uc, align 1
278 %17 = zext i8 %16 to i32
279 %18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
280 %19 = atomicrmw add i32* %18, i32 %17 monotonic
281 %20 = add i32 %19, %17
282 store i32 %20, i32* @si, align 4
283 %21 = load i8, i8* @uc, align 1
284 %22 = zext i8 %21 to i32
285 %23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
286 %24 = atomicrmw add i32* %23, i32 %22 monotonic
287 %25 = add i32 %24, %22
288 store i32 %25, i32* @ui, align 4
289 %26 = load i8, i8* @uc, align 1
290 %27 = zext i8 %26 to i64
291 %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
292 %29 = atomicrmw add i64* %28, i64 %27 monotonic
293 %30 = add i64 %29, %27
294 store i64 %30, i64* @sl, align 8
295 %31 = load i8, i8* @uc, align 1
296 %32 = zext i8 %31 to i64
297 %33 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
298 %34 = atomicrmw add i64* %33, i64 %32 monotonic
299 %35 = add i64 %34, %32
300 store i64 %35, i64* @ul, align 8
301 %36 = load i8, i8* @uc, align 1
302 %37 = atomicrmw sub i8* @sc, i8 %36 monotonic
303 %38 = sub i8 %37, %36
304 store i8 %38, i8* @sc, align 1
305 %39 = load i8, i8* @uc, align 1
306 %40 = atomicrmw sub i8* @uc, i8 %39 monotonic
307 %41 = sub i8 %40, %39
308 store i8 %41, i8* @uc, align 1
309 %42 = load i8, i8* @uc, align 1
310 %43 = zext i8 %42 to i16
311 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
312 %45 = atomicrmw sub i16* %44, i16 %43 monotonic
313 %46 = sub i16 %45, %43
314 store i16 %46, i16* @ss, align 2
315 %47 = load i8, i8* @uc, align 1
316 %48 = zext i8 %47 to i16
317 %49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
318 %50 = atomicrmw sub i16* %49, i16 %48 monotonic
319 %51 = sub i16 %50, %48
320 store i16 %51, i16* @us, align 2
321 %52 = load i8, i8* @uc, align 1
322 %53 = zext i8 %52 to i32
323 %54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
324 %55 = atomicrmw sub i32* %54, i32 %53 monotonic
325 %56 = sub i32 %55, %53
326 store i32 %56, i32* @si, align 4
327 %57 = load i8, i8* @uc, align 1
328 %58 = zext i8 %57 to i32
329 %59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
330 %60 = atomicrmw sub i32* %59, i32 %58 monotonic
331 %61 = sub i32 %60, %58
332 store i32 %61, i32* @ui, align 4
333 %62 = load i8, i8* @uc, align 1
334 %63 = zext i8 %62 to i64
335 %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
336 %65 = atomicrmw sub i64* %64, i64 %63 monotonic
337 %66 = sub i64 %65, %63
338 store i64 %66, i64* @sl, align 8
339 %67 = load i8, i8* @uc, align 1
340 %68 = zext i8 %67 to i64
341 %69 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
342 %70 = atomicrmw sub i64* %69, i64 %68 monotonic
343 %71 = sub i64 %70, %68
344 store i64 %71, i64* @ul, align 8
345 %72 = load i8, i8* @uc, align 1
346 %73 = atomicrmw or i8* @sc, i8 %72 monotonic
348 store i8 %74, i8* @sc, align 1
349 %75 = load i8, i8* @uc, align 1
350 %76 = atomicrmw or i8* @uc, i8 %75 monotonic
352 store i8 %77, i8* @uc, align 1
353 %78 = load i8, i8* @uc, align 1
354 %79 = zext i8 %78 to i16
355 %80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
356 %81 = atomicrmw or i16* %80, i16 %79 monotonic
357 %82 = or i16 %81, %79
358 store i16 %82, i16* @ss, align 2
359 %83 = load i8, i8* @uc, align 1
360 %84 = zext i8 %83 to i16
361 %85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
362 %86 = atomicrmw or i16* %85, i16 %84 monotonic
363 %87 = or i16 %86, %84
364 store i16 %87, i16* @us, align 2
365 %88 = load i8, i8* @uc, align 1
366 %89 = zext i8 %88 to i32
367 %90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
368 %91 = atomicrmw or i32* %90, i32 %89 monotonic
369 %92 = or i32 %91, %89
370 store i32 %92, i32* @si, align 4
371 %93 = load i8, i8* @uc, align 1
372 %94 = zext i8 %93 to i32
373 %95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
374 %96 = atomicrmw or i32* %95, i32 %94 monotonic
375 %97 = or i32 %96, %94
376 store i32 %97, i32* @ui, align 4
377 %98 = load i8, i8* @uc, align 1
378 %99 = zext i8 %98 to i64
379 %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
380 %101 = atomicrmw or i64* %100, i64 %99 monotonic
381 %102 = or i64 %101, %99
382 store i64 %102, i64* @sl, align 8
383 %103 = load i8, i8* @uc, align 1
384 %104 = zext i8 %103 to i64
385 %105 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
386 %106 = atomicrmw or i64* %105, i64 %104 monotonic
387 %107 = or i64 %106, %104
388 store i64 %107, i64* @ul, align 8
389 %108 = load i8, i8* @uc, align 1
390 %109 = atomicrmw xor i8* @sc, i8 %108 monotonic
391 %110 = xor i8 %109, %108
392 store i8 %110, i8* @sc, align 1
393 %111 = load i8, i8* @uc, align 1
394 %112 = atomicrmw xor i8* @uc, i8 %111 monotonic
395 %113 = xor i8 %112, %111
396 store i8 %113, i8* @uc, align 1
397 %114 = load i8, i8* @uc, align 1
398 %115 = zext i8 %114 to i16
399 %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
400 %117 = atomicrmw xor i16* %116, i16 %115 monotonic
401 %118 = xor i16 %117, %115
402 store i16 %118, i16* @ss, align 2
403 %119 = load i8, i8* @uc, align 1
404 %120 = zext i8 %119 to i16
405 %121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
406 %122 = atomicrmw xor i16* %121, i16 %120 monotonic
407 %123 = xor i16 %122, %120
408 store i16 %123, i16* @us, align 2
409 %124 = load i8, i8* @uc, align 1
410 %125 = zext i8 %124 to i32
411 %126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
412 %127 = atomicrmw xor i32* %126, i32 %125 monotonic
413 %128 = xor i32 %127, %125
414 store i32 %128, i32* @si, align 4
415 %129 = load i8, i8* @uc, align 1
416 %130 = zext i8 %129 to i32
417 %131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
418 %132 = atomicrmw xor i32* %131, i32 %130 monotonic
419 %133 = xor i32 %132, %130
420 store i32 %133, i32* @ui, align 4
421 %134 = load i8, i8* @uc, align 1
422 %135 = zext i8 %134 to i64
423 %136 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
424 %137 = atomicrmw xor i64* %136, i64 %135 monotonic
425 %138 = xor i64 %137, %135
426 store i64 %138, i64* @sl, align 8
427 %139 = load i8, i8* @uc, align 1
428 %140 = zext i8 %139 to i64
429 %141 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
430 %142 = atomicrmw xor i64* %141, i64 %140 monotonic
431 %143 = xor i64 %142, %140
432 store i64 %143, i64* @ul, align 8
433 %144 = load i8, i8* @uc, align 1
434 %145 = atomicrmw and i8* @sc, i8 %144 monotonic
435 %146 = and i8 %145, %144
436 store i8 %146, i8* @sc, align 1
437 %147 = load i8, i8* @uc, align 1
438 %148 = atomicrmw and i8* @uc, i8 %147 monotonic
439 %149 = and i8 %148, %147
440 store i8 %149, i8* @uc, align 1
441 %150 = load i8, i8* @uc, align 1
442 %151 = zext i8 %150 to i16
443 %152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
444 %153 = atomicrmw and i16* %152, i16 %151 monotonic
445 %154 = and i16 %153, %151
446 store i16 %154, i16* @ss, align 2
447 %155 = load i8, i8* @uc, align 1
448 %156 = zext i8 %155 to i16
449 %157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
450 %158 = atomicrmw and i16* %157, i16 %156 monotonic
451 %159 = and i16 %158, %156
452 store i16 %159, i16* @us, align 2
453 %160 = load i8, i8* @uc, align 1
454 %161 = zext i8 %160 to i32
455 %162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
456 %163 = atomicrmw and i32* %162, i32 %161 monotonic
457 %164 = and i32 %163, %161
458 store i32 %164, i32* @si, align 4
459 %165 = load i8, i8* @uc, align 1
460 %166 = zext i8 %165 to i32
461 %167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
462 %168 = atomicrmw and i32* %167, i32 %166 monotonic
463 %169 = and i32 %168, %166
464 store i32 %169, i32* @ui, align 4
465 %170 = load i8, i8* @uc, align 1
466 %171 = zext i8 %170 to i64
467 %172 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
468 %173 = atomicrmw and i64* %172, i64 %171 monotonic
469 %174 = and i64 %173, %171
470 store i64 %174, i64* @sl, align 8
471 %175 = load i8, i8* @uc, align 1
472 %176 = zext i8 %175 to i64
473 %177 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
474 %178 = atomicrmw and i64* %177, i64 %176 monotonic
475 %179 = and i64 %178, %176
476 store i64 %179, i64* @ul, align 8
477 %180 = load i8, i8* @uc, align 1
478 %181 = atomicrmw nand i8* @sc, i8 %180 monotonic
479 %182 = xor i8 %181, -1
480 %183 = and i8 %182, %180
481 store i8 %183, i8* @sc, align 1
482 %184 = load i8, i8* @uc, align 1
483 %185 = atomicrmw nand i8* @uc, i8 %184 monotonic
484 %186 = xor i8 %185, -1
485 %187 = and i8 %186, %184
486 store i8 %187, i8* @uc, align 1
487 %188 = load i8, i8* @uc, align 1
488 %189 = zext i8 %188 to i16
489 %190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
490 %191 = atomicrmw nand i16* %190, i16 %189 monotonic
491 %192 = xor i16 %191, -1
492 %193 = and i16 %192, %189
493 store i16 %193, i16* @ss, align 2
494 %194 = load i8, i8* @uc, align 1
495 %195 = zext i8 %194 to i16
496 %196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
497 %197 = atomicrmw nand i16* %196, i16 %195 monotonic
498 %198 = xor i16 %197, -1
499 %199 = and i16 %198, %195
500 store i16 %199, i16* @us, align 2
501 %200 = load i8, i8* @uc, align 1
502 %201 = zext i8 %200 to i32
503 %202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
504 %203 = atomicrmw nand i32* %202, i32 %201 monotonic
505 %204 = xor i32 %203, -1
506 %205 = and i32 %204, %201
507 store i32 %205, i32* @si, align 4
508 %206 = load i8, i8* @uc, align 1
509 %207 = zext i8 %206 to i32
510 %208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
511 %209 = atomicrmw nand i32* %208, i32 %207 monotonic
512 %210 = xor i32 %209, -1
513 %211 = and i32 %210, %207
514 store i32 %211, i32* @ui, align 4
515 %212 = load i8, i8* @uc, align 1
516 %213 = zext i8 %212 to i64
517 %214 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
518 %215 = atomicrmw nand i64* %214, i64 %213 monotonic
519 %216 = xor i64 %215, -1
520 %217 = and i64 %216, %213
521 store i64 %217, i64* @sl, align 8
522 %218 = load i8, i8* @uc, align 1
523 %219 = zext i8 %218 to i64
524 %220 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
525 %221 = atomicrmw nand i64* %220, i64 %219 monotonic
526 %222 = xor i64 %221, -1
527 %223 = and i64 %222, %219
528 store i64 %223, i64* @ul, align 8
531 return: ; preds = %entry
535 define void @test_compare_and_swap() nounwind {
537 %0 = load i8, i8* @uc, align 1
538 %1 = load i8, i8* @sc, align 1
539 %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic monotonic
540 store i8 %2, i8* @sc, align 1
541 %3 = load i8, i8* @uc, align 1
542 %4 = load i8, i8* @sc, align 1
543 %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic monotonic
544 store i8 %5, i8* @uc, align 1
545 %6 = load i8, i8* @uc, align 1
546 %7 = zext i8 %6 to i16
547 %8 = load i8, i8* @sc, align 1
548 %9 = sext i8 %8 to i16
549 %10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
550 %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic monotonic
551 store i16 %11, i16* @ss, align 2
552 %12 = load i8, i8* @uc, align 1
553 %13 = zext i8 %12 to i16
554 %14 = load i8, i8* @sc, align 1
555 %15 = sext i8 %14 to i16
556 %16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
557 %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic monotonic
558 store i16 %17, i16* @us, align 2
559 %18 = load i8, i8* @uc, align 1
560 %19 = zext i8 %18 to i32
561 %20 = load i8, i8* @sc, align 1
562 %21 = sext i8 %20 to i32
563 %22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
564 %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic monotonic
565 store i32 %23, i32* @si, align 4
566 %24 = load i8, i8* @uc, align 1
567 %25 = zext i8 %24 to i32
568 %26 = load i8, i8* @sc, align 1
569 %27 = sext i8 %26 to i32
570 %28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
571 %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic monotonic
572 store i32 %29, i32* @ui, align 4
573 %30 = load i8, i8* @uc, align 1
574 %31 = zext i8 %30 to i64
575 %32 = load i8, i8* @sc, align 1
576 %33 = sext i8 %32 to i64
577 %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
578 %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic monotonic
579 store i64 %35, i64* @sl, align 8
580 %36 = load i8, i8* @uc, align 1
581 %37 = zext i8 %36 to i64
582 %38 = load i8, i8* @sc, align 1
583 %39 = sext i8 %38 to i64
584 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
585 %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic monotonic
586 store i64 %41, i64* @ul, align 8
587 %42 = load i8, i8* @uc, align 1
588 %43 = load i8, i8* @sc, align 1
589 %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic monotonic
590 %45 = icmp eq i8 %44, %42
591 %46 = zext i1 %45 to i8
592 %47 = zext i8 %46 to i32
593 store i32 %47, i32* @ui, align 4
594 %48 = load i8, i8* @uc, align 1
595 %49 = load i8, i8* @sc, align 1
596 %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic monotonic
597 %51 = icmp eq i8 %50, %48
598 %52 = zext i1 %51 to i8
599 %53 = zext i8 %52 to i32
600 store i32 %53, i32* @ui, align 4
601 %54 = load i8, i8* @uc, align 1
602 %55 = zext i8 %54 to i16
603 %56 = load i8, i8* @sc, align 1
604 %57 = sext i8 %56 to i16
605 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
606 %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic monotonic
607 %60 = icmp eq i16 %59, %55
608 %61 = zext i1 %60 to i8
609 %62 = zext i8 %61 to i32
610 store i32 %62, i32* @ui, align 4
611 %63 = load i8, i8* @uc, align 1
612 %64 = zext i8 %63 to i16
613 %65 = load i8, i8* @sc, align 1
614 %66 = sext i8 %65 to i16
615 %67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
616 %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic monotonic
617 %69 = icmp eq i16 %68, %64
618 %70 = zext i1 %69 to i8
619 %71 = zext i8 %70 to i32
620 store i32 %71, i32* @ui, align 4
621 %72 = load i8, i8* @uc, align 1
622 %73 = zext i8 %72 to i32
623 %74 = load i8, i8* @sc, align 1
624 %75 = sext i8 %74 to i32
625 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
626 %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic monotonic
627 %78 = icmp eq i32 %77, %73
628 %79 = zext i1 %78 to i8
629 %80 = zext i8 %79 to i32
630 store i32 %80, i32* @ui, align 4
631 %81 = load i8, i8* @uc, align 1
632 %82 = zext i8 %81 to i32
633 %83 = load i8, i8* @sc, align 1
634 %84 = sext i8 %83 to i32
635 %85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
636 %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic monotonic
637 %87 = icmp eq i32 %86, %82
638 %88 = zext i1 %87 to i8
639 %89 = zext i8 %88 to i32
640 store i32 %89, i32* @ui, align 4
641 %90 = load i8, i8* @uc, align 1
642 %91 = zext i8 %90 to i64
643 %92 = load i8, i8* @sc, align 1
644 %93 = sext i8 %92 to i64
645 %94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
646 %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic monotonic
647 %96 = icmp eq i64 %95, %91
648 %97 = zext i1 %96 to i8
649 %98 = zext i8 %97 to i32
650 store i32 %98, i32* @ui, align 4
651 %99 = load i8, i8* @uc, align 1
652 %100 = zext i8 %99 to i64
653 %101 = load i8, i8* @sc, align 1
654 %102 = sext i8 %101 to i64
655 %103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
656 %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic monotonic
657 %105 = icmp eq i64 %104, %100
658 %106 = zext i1 %105 to i8
659 %107 = zext i8 %106 to i32
660 store i32 %107, i32* @ui, align 4
663 return: ; preds = %entry
667 define void @test_lock() nounwind {
669 %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
670 store i8 %0, i8* @sc, align 1
671 %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
672 store i8 %1, i8* @uc, align 1
673 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
674 %3 = atomicrmw xchg i16* %2, i16 1 monotonic
675 store i16 %3, i16* @ss, align 2
676 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
677 %5 = atomicrmw xchg i16* %4, i16 1 monotonic
678 store i16 %5, i16* @us, align 2
679 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
680 %7 = atomicrmw xchg i32* %6, i32 1 monotonic
681 store i32 %7, i32* @si, align 4
682 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
683 %9 = atomicrmw xchg i32* %8, i32 1 monotonic
684 store i32 %9, i32* @ui, align 4
685 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
686 %11 = atomicrmw xchg i64* %10, i64 1 monotonic
687 store i64 %11, i64* @sl, align 8
688 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
689 %13 = atomicrmw xchg i64* %12, i64 1 monotonic
690 store i64 %13, i64* @ul, align 8
692 store volatile i8 0, i8* @sc, align 1
693 store volatile i8 0, i8* @uc, align 1
694 %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
695 store volatile i16 0, i16* %14, align 2
696 %15 = bitcast i8* bitcast (i16* @us to i8*) to i16*
697 store volatile i16 0, i16* %15, align 2
698 %16 = bitcast i8* bitcast (i32* @si to i8*) to i32*
699 store volatile i32 0, i32* %16, align 4
700 %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
701 store volatile i32 0, i32* %17, align 4
702 %18 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
703 store volatile i64 0, i64* %18, align 8
704 %19 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
705 store volatile i64 0, i64* %19, align 8
706 %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
707 store volatile i64 0, i64* %20, align 8
708 %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
709 store volatile i64 0, i64* %21, align 8
712 return: ; preds = %entry