1 ; RUN: llc < %s -march=sparcv9 -mattr=+popc -disable-sparc-delay-filler -disable-sparc-leaf-proc | FileCheck %s
2 ; RUN: llc < %s -march=sparcv9 -mattr=+popc | FileCheck %s -check-prefix=OPT
10 define i64 @ret2(i64 %a, i64 %b) {
15 ; CHECK: sllx %i0, 7, %i0
19 ; OPT: sllx %o0, 7, %o0
20 define i64 @shl_imm(i64 %a) {
26 ; CHECK: srax %i0, %i1, %i0
30 ; OPT: srax %o0, %o1, %o0
31 define i64 @sra_reg(i64 %a, i64 %b) {
36 ; Immediate materialization. Many of these patterns could actually be merged
37 ; into the restore instruction:
39 ; restore %g0, %g0, %o0
47 define i64 @ret_imm0() {
52 ; CHECK: mov -4096, %i0
57 define i64 @ret_simm13() {
69 define i64 @ret_sethi() {
74 ; CHECK: sethi 4, [[R:%[goli][0-7]]]
75 ; CHECK: or [[R]], 1, %i0
78 ; OPT: sethi 4, [[R:%[go][0-7]]]
80 ; OPT: or [[R]], 1, %o0
82 define i64 @ret_sethi_or() {
87 ; CHECK: sethi 4, [[R:%[goli][0-7]]]
88 ; CHECK: xor [[R]], -4, %i0
91 ; OPT: sethi 4, [[R:%[go][0-7]]]
93 ; OPT: xor [[R]], -4, %o0
95 define i64 @ret_nimm33() {
102 define i64 @ret_bigimm() {
103 ret i64 6800754272627607872
107 ; CHECK: sethi 1048576
108 define i64 @ret_bigimm2() {
109 ret i64 4611686018427387904 ; 0x4000000000000000
113 ; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]]
114 ; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]]
115 ; CHECK: andn [[R1]], %i0, %i0
116 define i64 @reg_reg_alu(i64 %x, i64 %y, i64 %z) {
125 ; CHECK: add %i0, -5, [[R0:%[goli][0-7]]]
126 ; CHECK: xor [[R0]], 2, %i0
127 define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
142 define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
143 %a = load i64, i64* %p
145 store i64 %ai, i64* %p
146 %b = load i32, i32* %q
147 %b2 = zext i32 %b to i64
148 %bi = trunc i64 %ai to i32
149 store i32 %bi, i32* %q
150 %c = load i32, i32* %r
151 %c2 = sext i32 %c to i64
152 store i64 %ai, i64* %p
153 %d = load i16, i16* %s
154 %d2 = sext i16 %d to i64
155 %di = trunc i64 %ai to i16
156 store i16 %di, i16* %s
158 %x1 = add i64 %a, %b2
159 %x2 = add i64 %c2, %d2
160 %x3 = add i64 %x1, %x2
165 ; CHECK: ldub [%i0], %i0
166 define i64 @load_bool(i1* %p) {
168 %b = zext i1 %a to i64
173 ; CHECK: ldx [%i0+8], [[R:%[goli][0-7]]]
174 ; CHECK: stx [[R]], [%i0+16]
175 ; CHECK: st [[R]], [%i1+-8]
176 ; CHECK: sth [[R]], [%i2+40]
177 ; CHECK: stb [[R]], [%i3+-20]
178 define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
179 %p1 = getelementptr i64, i64* %p, i64 1
180 %p2 = getelementptr i64, i64* %p, i64 2
181 %pv = load i64, i64* %p1
182 store i64 %pv, i64* %p2
184 %q2 = getelementptr i32, i32* %q, i32 -2
185 %qv = trunc i64 %pv to i32
186 store i32 %qv, i32* %q2
188 %r2 = getelementptr i16, i16* %r, i16 20
189 %rv = trunc i64 %pv to i16
190 store i16 %rv, i16* %r2
192 %s2 = getelementptr i8, i8* %s, i8 -20
193 %sv = trunc i64 %pv to i8
194 store i8 %sv, i8* %s2
199 ; CHECK: promote_shifts
200 ; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
201 ; CHECK: sll [[R]], [[R]], %i0
202 define i8 @promote_shifts(i8* %p) {
203 %L24 = load i8, i8* %p
204 %L32 = load i8, i8* %p
205 %B36 = shl i8 %L24, %L32
210 ; CHECK: mulx %i0, %i1, %i0
211 define i64 @multiply(i64 %a, i64 %b) {
216 ; CHECK: signed_divide
217 ; CHECK: sdivx %i0, %i1, %i0
218 define i64 @signed_divide(i64 %a, i64 %b) {
223 ; CHECK: unsigned_divide
224 ; CHECK: udivx %i0, %i1, %i0
225 define i64 @unsigned_divide(i64 %a, i64 %b) {
230 define void @access_fi() {
232 %b = alloca [32 x i8], align 1
233 %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %b, i64 0, i64 0
234 call void @g(i8* %arraydecay) #2
240 ; CHECK: expand_setcc
242 ; CHECK: movl %xcc, 1,
243 define i32 @expand_setcc(i64 %a) {
244 %cond = icmp sle i64 %a, 0
245 %cast2 = zext i1 %cond to i32
246 %RV = sub i32 1, %cast2
253 define i64 @spill_i64(i64 %x) {
254 call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7}"()
258 ; CHECK: bitcast_i64_f64
261 define i64 @bitcast_i64_f64(double %x) {
262 %y = bitcast double %x to i64
266 ; CHECK: bitcast_f64_i64
269 define double @bitcast_f64_i64(i64 %x) {
270 %y = bitcast i64 %x to double
274 ; CHECK-LABEL: store_zero:
275 ; CHECK: stx %g0, [%i0]
276 ; CHECK: stx %g0, [%i1+8]
278 ; OPT-LABEL: store_zero:
279 ; OPT: stx %g0, [%o0]
280 ; OPT: stx %g0, [%o1+8]
281 define i64 @store_zero(i64* nocapture %a, i64* nocapture %b) {
283 store i64 0, i64* %a, align 8
284 %0 = getelementptr inbounds i64, i64* %b, i32 1
285 store i64 0, i64* %0, align 8
289 ; CHECK-LABEL: bit_ops
295 define i64 @bit_ops(i64 %arg) {
297 %0 = tail call i64 @llvm.ctpop.i64(i64 %arg)
298 %1 = tail call i64 @llvm.ctlz.i64(i64 %arg, i1 true)
299 %2 = tail call i64 @llvm.cttz.i64(i64 %arg, i1 true)
300 %3 = tail call i64 @llvm.bswap.i64(i64 %arg)
307 declare i64 @llvm.ctpop.i64(i64) nounwind readnone
308 declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
309 declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone
310 declare i64 @llvm.bswap.i64(i64) nounwind readnone