1 ; Test 128-bit addition in which the second operand is a zero-extended i32.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5 ; Check register additions. The XOR ensures that we don't instead zero-extend
6 ; %b into a register and use memory addition.
7 define void @f1(i128 *%aptr, i32 %b) {
9 ; CHECK: algfr {{%r[0-5]}}, %r3
12 %a = load i128, i128 *%aptr
13 %xor = xor i128 %a, 127
14 %bext = zext i32 %b to i128
15 %add = add i128 %xor, %bext
16 store i128 %add, i128 *%aptr
20 ; Like f1, but using an "in-register" extension.
21 define void @f2(i128 *%aptr, i64 %b) {
23 ; CHECK: algfr {{%r[0-5]}}, %r3
26 %a = load i128, i128 *%aptr
27 %xor = xor i128 %a, 127
28 %trunc = trunc i64 %b to i32
29 %bext = zext i32 %trunc to i128
30 %add = add i128 %xor, %bext
31 store i128 %add, i128 *%aptr
35 ; Test register addition in cases where the second operand is zero extended
36 ; from i64 rather than i32, but is later masked to i32 range.
37 define void @f3(i128 *%aptr, i64 %b) {
39 ; CHECK: algfr {{%r[0-5]}}, %r3
42 %a = load i128, i128 *%aptr
43 %xor = xor i128 %a, 127
44 %bext = zext i64 %b to i128
45 %and = and i128 %bext, 4294967295
46 %add = add i128 %xor, %and
47 store i128 %add, i128 *%aptr
51 ; Test ALGF with no offset.
52 define void @f4(i128 *%aptr, i32 *%bsrc) {
54 ; CHECK: algf {{%r[0-5]}}, 0(%r3)
57 %a = load i128, i128 *%aptr
58 %xor = xor i128 %a, 127
59 %b = load i32, i32 *%bsrc
60 %bext = zext i32 %b to i128
61 %add = add i128 %xor, %bext
62 store i128 %add, i128 *%aptr
66 ; Check the high end of the ALGF range.
67 define void @f5(i128 *%aptr, i32 *%bsrc) {
69 ; CHECK: algf {{%r[0-5]}}, 524284(%r3)
72 %a = load i128, i128 *%aptr
73 %xor = xor i128 %a, 127
74 %ptr = getelementptr i32, i32 *%bsrc, i64 131071
75 %b = load i32, i32 *%ptr
76 %bext = zext i32 %b to i128
77 %add = add i128 %xor, %bext
78 store i128 %add, i128 *%aptr
82 ; Check the next word up, which must use separate address logic.
83 ; Other sequences besides this one would be OK.
84 define void @f6(i128 *%aptr, i32 *%bsrc) {
86 ; CHECK: agfi %r3, 524288
87 ; CHECK: algf {{%r[0-5]}}, 0(%r3)
90 %a = load i128, i128 *%aptr
91 %xor = xor i128 %a, 127
92 %ptr = getelementptr i32, i32 *%bsrc, i64 131072
93 %b = load i32, i32 *%ptr
94 %bext = zext i32 %b to i128
95 %add = add i128 %xor, %bext
96 store i128 %add, i128 *%aptr
100 ; Check the high end of the negative aligned ALGF range.
101 define void @f7(i128 *%aptr, i32 *%bsrc) {
103 ; CHECK: algf {{%r[0-5]}}, -4(%r3)
106 %a = load i128, i128 *%aptr
107 %xor = xor i128 %a, 127
108 %ptr = getelementptr i32, i32 *%bsrc, i128 -1
109 %b = load i32, i32 *%ptr
110 %bext = zext i32 %b to i128
111 %add = add i128 %xor, %bext
112 store i128 %add, i128 *%aptr
116 ; Check the low end of the ALGF range.
117 define void @f8(i128 *%aptr, i32 *%bsrc) {
119 ; CHECK: algf {{%r[0-5]}}, -524288(%r3)
122 %a = load i128, i128 *%aptr
123 %xor = xor i128 %a, 127
124 %ptr = getelementptr i32, i32 *%bsrc, i128 -131072
125 %b = load i32, i32 *%ptr
126 %bext = zext i32 %b to i128
127 %add = add i128 %xor, %bext
128 store i128 %add, i128 *%aptr
132 ; Check the next word down, which needs separate address logic.
133 ; Other sequences besides this one would be OK.
134 define void @f9(i128 *%aptr, i32 *%bsrc) {
136 ; CHECK: agfi %r3, -524292
137 ; CHECK: algf {{%r[0-5]}}, 0(%r3)
140 %a = load i128, i128 *%aptr
141 %xor = xor i128 %a, 127
142 %ptr = getelementptr i32, i32 *%bsrc, i128 -131073
143 %b = load i32, i32 *%ptr
144 %bext = zext i32 %b to i128
145 %add = add i128 %xor, %bext
146 store i128 %add, i128 *%aptr
150 ; Check that ALGF allows an index.
151 define void @f10(i128 *%aptr, i64 %src, i64 %index) {
153 ; CHECK: algf {{%r[0-5]}}, 524284({{%r4,%r3|%r3,%r4}})
155 %a = load i128, i128 *%aptr
156 %xor = xor i128 %a, 127
157 %add1 = add i64 %src, %index
158 %add2 = add i64 %add1, 524284
159 %ptr = inttoptr i64 %add2 to i32 *
160 %b = load i32, i32 *%ptr
161 %bext = zext i32 %b to i128
162 %add = add i128 %xor, %bext
163 store i128 %add, i128 *%aptr