1 ; Test 32-bit conditional stores that are presented as selects.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
5 declare void @foo(i32 *)
7 ; Test the simple case, with the loaded value first.
8 define void @f1(i32 *%ptr, i32 %alt, i32 %limit) {
13 ; CHECK: st %r3, 0(%r2)
15 %cond = icmp ult i32 %limit, 420
16 %orig = load i32, i32 *%ptr
17 %res = select i1 %cond, i32 %orig, i32 %alt
18 store i32 %res, i32 *%ptr
22 ; ...and with the loaded value second
23 define void @f2(i32 *%ptr, i32 %alt, i32 %limit) {
28 ; CHECK: st %r3, 0(%r2)
30 %cond = icmp ult i32 %limit, 420
31 %orig = load i32, i32 *%ptr
32 %res = select i1 %cond, i32 %alt, i32 %orig
33 store i32 %res, i32 *%ptr
37 ; Test cases where the value is explicitly sign-extended to 64 bits, with the
39 define void @f3(i32 *%ptr, i64 %alt, i32 %limit) {
44 ; CHECK: st %r3, 0(%r2)
46 %cond = icmp ult i32 %limit, 420
47 %orig = load i32, i32 *%ptr
48 %ext = sext i32 %orig to i64
49 %res = select i1 %cond, i64 %ext, i64 %alt
50 %trunc = trunc i64 %res to i32
51 store i32 %trunc, i32 *%ptr
55 ; ...and with the loaded value second
56 define void @f4(i32 *%ptr, i64 %alt, i32 %limit) {
61 ; CHECK: st %r3, 0(%r2)
63 %cond = icmp ult i32 %limit, 420
64 %orig = load i32, i32 *%ptr
65 %ext = sext i32 %orig to i64
66 %res = select i1 %cond, i64 %alt, i64 %ext
67 %trunc = trunc i64 %res to i32
68 store i32 %trunc, i32 *%ptr
72 ; Test cases where the value is explicitly zero-extended to 32 bits, with the
74 define void @f5(i32 *%ptr, i64 %alt, i32 %limit) {
79 ; CHECK: st %r3, 0(%r2)
81 %cond = icmp ult i32 %limit, 420
82 %orig = load i32, i32 *%ptr
83 %ext = zext i32 %orig to i64
84 %res = select i1 %cond, i64 %ext, i64 %alt
85 %trunc = trunc i64 %res to i32
86 store i32 %trunc, i32 *%ptr
90 ; ...and with the loaded value second
91 define void @f6(i32 *%ptr, i64 %alt, i32 %limit) {
96 ; CHECK: st %r3, 0(%r2)
98 %cond = icmp ult i32 %limit, 420
99 %orig = load i32, i32 *%ptr
100 %ext = zext i32 %orig to i64
101 %res = select i1 %cond, i64 %alt, i64 %ext
102 %trunc = trunc i64 %res to i32
103 store i32 %trunc, i32 *%ptr
107 ; Check the high end of the aligned ST range.
108 define void @f7(i32 *%base, i32 %alt, i32 %limit) {
113 ; CHECK: st %r3, 4092(%r2)
115 %ptr = getelementptr i32, i32 *%base, i64 1023
116 %cond = icmp ult i32 %limit, 420
117 %orig = load i32, i32 *%ptr
118 %res = select i1 %cond, i32 %orig, i32 %alt
119 store i32 %res, i32 *%ptr
123 ; Check the next word up, which should use STY instead of ST.
124 define void @f8(i32 *%base, i32 %alt, i32 %limit) {
129 ; CHECK: sty %r3, 4096(%r2)
131 %ptr = getelementptr i32, i32 *%base, i64 1024
132 %cond = icmp ult i32 %limit, 420
133 %orig = load i32, i32 *%ptr
134 %res = select i1 %cond, i32 %orig, i32 %alt
135 store i32 %res, i32 *%ptr
139 ; Check the high end of the aligned STY range.
140 define void @f9(i32 *%base, i32 %alt, i32 %limit) {
145 ; CHECK: sty %r3, 524284(%r2)
147 %ptr = getelementptr i32, i32 *%base, i64 131071
148 %cond = icmp ult i32 %limit, 420
149 %orig = load i32, i32 *%ptr
150 %res = select i1 %cond, i32 %orig, i32 %alt
151 store i32 %res, i32 *%ptr
155 ; Check the next word up, which needs separate address logic.
156 ; Other sequences besides this one would be OK.
157 define void @f10(i32 *%base, i32 %alt, i32 %limit) {
162 ; CHECK: agfi %r2, 524288
163 ; CHECK: st %r3, 0(%r2)
165 %ptr = getelementptr i32, i32 *%base, i64 131072
166 %cond = icmp ult i32 %limit, 420
167 %orig = load i32, i32 *%ptr
168 %res = select i1 %cond, i32 %orig, i32 %alt
169 store i32 %res, i32 *%ptr
173 ; Check the low end of the STY range.
174 define void @f11(i32 *%base, i32 %alt, i32 %limit) {
179 ; CHECK: sty %r3, -524288(%r2)
181 %ptr = getelementptr i32, i32 *%base, i64 -131072
182 %cond = icmp ult i32 %limit, 420
183 %orig = load i32, i32 *%ptr
184 %res = select i1 %cond, i32 %orig, i32 %alt
185 store i32 %res, i32 *%ptr
189 ; Check the next word down, which needs separate address logic.
190 ; Other sequences besides this one would be OK.
191 define void @f12(i32 *%base, i32 %alt, i32 %limit) {
196 ; CHECK: agfi %r2, -524292
197 ; CHECK: st %r3, 0(%r2)
199 %ptr = getelementptr i32, i32 *%base, i64 -131073
200 %cond = icmp ult i32 %limit, 420
201 %orig = load i32, i32 *%ptr
202 %res = select i1 %cond, i32 %orig, i32 %alt
203 store i32 %res, i32 *%ptr
207 ; Check that STY allows an index.
208 define void @f13(i64 %base, i64 %index, i32 %alt, i32 %limit) {
213 ; CHECK: sty %r4, 4096(%r3,%r2)
215 %add1 = add i64 %base, %index
216 %add2 = add i64 %add1, 4096
217 %ptr = inttoptr i64 %add2 to i32 *
218 %cond = icmp ult i32 %limit, 420
219 %orig = load i32, i32 *%ptr
220 %res = select i1 %cond, i32 %orig, i32 %alt
221 store i32 %res, i32 *%ptr
225 ; Check that volatile loads are not matched.
226 define void @f14(i32 *%ptr, i32 %alt, i32 %limit) {
228 ; CHECK: l {{%r[0-5]}}, 0(%r2)
229 ; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
231 ; CHECK: st {{%r[0-5]}}, 0(%r2)
233 %cond = icmp ult i32 %limit, 420
234 %orig = load volatile i32, i32 *%ptr
235 %res = select i1 %cond, i32 %orig, i32 %alt
236 store i32 %res, i32 *%ptr
240 ; ...likewise stores. In this case we should have a conditional load into %r3.
241 define void @f15(i32 *%ptr, i32 %alt, i32 %limit) {
243 ; CHECK: jhe [[LABEL:[^ ]*]]
244 ; CHECK: l %r3, 0(%r2)
246 ; CHECK: st %r3, 0(%r2)
248 %cond = icmp ult i32 %limit, 420
249 %orig = load i32, i32 *%ptr
250 %res = select i1 %cond, i32 %orig, i32 %alt
251 store volatile i32 %res, i32 *%ptr
255 ; Check that atomic loads are not matched. The transformation is OK for
256 ; the "unordered" case tested here, but since we don't try to handle atomic
257 ; operations at all in this context, it seems better to assert that than
258 ; to restrict the test to a stronger ordering.
259 define void @f16(i32 *%ptr, i32 %alt, i32 %limit) {
260 ; FIXME: should use a normal load instead of CS.
262 ; CHECK: l {{%r[0-5]}}, 0(%r2)
263 ; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
265 ; CHECK: st {{%r[0-5]}}, 0(%r2)
267 %cond = icmp ult i32 %limit, 420
268 %orig = load atomic i32, i32 *%ptr unordered, align 4
269 %res = select i1 %cond, i32 %orig, i32 %alt
270 store i32 %res, i32 *%ptr
274 ; ...likewise stores.
275 define void @f17(i32 *%ptr, i32 %alt, i32 %limit) {
276 ; FIXME: should use a normal store instead of CS.
278 ; CHECK: jhe [[LABEL:[^ ]*]]
279 ; CHECK: l %r3, 0(%r2)
281 ; CHECK: st %r3, 0(%r2)
283 %cond = icmp ult i32 %limit, 420
284 %orig = load i32, i32 *%ptr
285 %res = select i1 %cond, i32 %orig, i32 %alt
286 store atomic i32 %res, i32 *%ptr unordered, align 4
290 ; Try a frame index base.
291 define void @f18(i32 %alt, i32 %limit) {
293 ; CHECK: brasl %r14, foo@PLT
295 ; CHECK: jl [[LABEL:[^ ]*]]
297 ; CHECK: st {{%r[0-9]+}}, {{[0-9]+}}(%r15)
299 ; CHECK: brasl %r14, foo@PLT
302 call void @foo(i32 *%ptr)
303 %cond = icmp ult i32 %limit, 420
304 %orig = load i32, i32 *%ptr
305 %res = select i1 %cond, i32 %orig, i32 %alt
306 store i32 %res, i32 *%ptr
307 call void @foo(i32 *%ptr)