1 ; Test 64-bit byteswaps from memory to registers.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5 declare i64 @llvm.bswap.i64(i64 %a)
7 ; Check LRVG with no displacement.
8 define i64 @f1(i64 *%src) {
10 ; CHECK: lrvg %r2, 0(%r2)
12 %a = load i64, i64 *%src
13 %swapped = call i64 @llvm.bswap.i64(i64 %a)
17 ; Check the high end of the aligned LRVG range.
18 define i64 @f2(i64 *%src) {
20 ; CHECK: lrvg %r2, 524280(%r2)
22 %ptr = getelementptr i64, i64 *%src, i64 65535
23 %a = load i64, i64 *%ptr
24 %swapped = call i64 @llvm.bswap.i64(i64 %a)
28 ; Check the next doubleword up, which needs separate address logic.
29 ; Other sequences besides this one would be OK.
30 define i64 @f3(i64 *%src) {
32 ; CHECK: agfi %r2, 524288
33 ; CHECK: lrvg %r2, 0(%r2)
35 %ptr = getelementptr i64, i64 *%src, i64 65536
36 %a = load i64, i64 *%ptr
37 %swapped = call i64 @llvm.bswap.i64(i64 %a)
41 ; Check the high end of the negative aligned LRVG range.
42 define i64 @f4(i64 *%src) {
44 ; CHECK: lrvg %r2, -8(%r2)
46 %ptr = getelementptr i64, i64 *%src, i64 -1
47 %a = load i64, i64 *%ptr
48 %swapped = call i64 @llvm.bswap.i64(i64 %a)
52 ; Check the low end of the LRVG range.
53 define i64 @f5(i64 *%src) {
55 ; CHECK: lrvg %r2, -524288(%r2)
57 %ptr = getelementptr i64, i64 *%src, i64 -65536
58 %a = load i64, i64 *%ptr
59 %swapped = call i64 @llvm.bswap.i64(i64 %a)
63 ; Check the next doubleword down, which needs separate address logic.
64 ; Other sequences besides this one would be OK.
65 define i64 @f6(i64 *%src) {
67 ; CHECK: agfi %r2, -524296
68 ; CHECK: lrvg %r2, 0(%r2)
70 %ptr = getelementptr i64, i64 *%src, i64 -65537
71 %a = load i64, i64 *%ptr
72 %swapped = call i64 @llvm.bswap.i64(i64 %a)
76 ; Check that LRVG allows an index.
77 define i64 @f7(i64 %src, i64 %index) {
79 ; CHECK: lrvg %r2, 524287({{%r3,%r2|%r2,%r3}})
81 %add1 = add i64 %src, %index
82 %add2 = add i64 %add1, 524287
83 %ptr = inttoptr i64 %add2 to i64 *
84 %a = load i64, i64 *%ptr
85 %swapped = call i64 @llvm.bswap.i64(i64 %a)
89 ; Test a case where we spill the source of at least one LRVGR. We want
90 ; to use LRVG if possible.
91 define i64 @f8(i64 *%ptr) {
93 ; CHECK: lrvg {{%r[0-9]+}}, 160(%r15)
96 %val0 = call i64 @foo()
97 %val1 = call i64 @foo()
98 %val2 = call i64 @foo()
99 %val3 = call i64 @foo()
100 %val4 = call i64 @foo()
101 %val5 = call i64 @foo()
102 %val6 = call i64 @foo()
103 %val7 = call i64 @foo()
104 %val8 = call i64 @foo()
105 %val9 = call i64 @foo()
107 %swapped0 = call i64 @llvm.bswap.i64(i64 %val0)
108 %swapped1 = call i64 @llvm.bswap.i64(i64 %val1)
109 %swapped2 = call i64 @llvm.bswap.i64(i64 %val2)
110 %swapped3 = call i64 @llvm.bswap.i64(i64 %val3)
111 %swapped4 = call i64 @llvm.bswap.i64(i64 %val4)
112 %swapped5 = call i64 @llvm.bswap.i64(i64 %val5)
113 %swapped6 = call i64 @llvm.bswap.i64(i64 %val6)
114 %swapped7 = call i64 @llvm.bswap.i64(i64 %val7)
115 %swapped8 = call i64 @llvm.bswap.i64(i64 %val8)
116 %swapped9 = call i64 @llvm.bswap.i64(i64 %val9)
118 %ret1 = add i64 %swapped0, %swapped1
119 %ret2 = add i64 %ret1, %swapped2
120 %ret3 = add i64 %ret2, %swapped3
121 %ret4 = add i64 %ret3, %swapped4
122 %ret5 = add i64 %ret4, %swapped5
123 %ret6 = add i64 %ret5, %swapped6
124 %ret7 = add i64 %ret6, %swapped7
125 %ret8 = add i64 %ret7, %swapped8
126 %ret9 = add i64 %ret8, %swapped9