1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -O3 < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64I
4 ; RUN: llc -mtriple=riscv64 -mattr=+zbb,+f -target-abi=lp64f -O3 < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-RV64IF
6 ; Tests aimed to check optimization which combines
7 ; two comparison operations and logic operation into
8 ; one select(min/max) operation and one comparison
11 ; 4 patterns below will be converted to umin+less.
12 define i1 @ulo(i64 %c, i64 %a, i64 %b) {
15 ; CHECK-NEXT: minu a1, a1, a2
16 ; CHECK-NEXT: sltu a0, a1, a0
18 %l0 = icmp ult i64 %a, %c
19 %l1 = icmp ult i64 %b, %c
24 define i1 @ulo_swap1(i64 %c, i64 %a, i64 %b) {
25 ; CHECK-LABEL: ulo_swap1:
27 ; CHECK-NEXT: minu a1, a1, a2
28 ; CHECK-NEXT: sltu a0, a1, a0
30 %l0 = icmp ugt i64 %c, %a
31 %l1 = icmp ult i64 %b, %c
36 define i1 @ulo_swap2(i64 %c, i64 %a, i64 %b) {
37 ; CHECK-LABEL: ulo_swap2:
39 ; CHECK-NEXT: minu a1, a1, a2
40 ; CHECK-NEXT: sltu a0, a1, a0
42 %l0 = icmp ult i64 %a, %c
43 %l1 = icmp ugt i64 %c, %b
48 define i1 @ulo_swap12(i64 %c, i64 %a, i64 %b) {
49 ; CHECK-LABEL: ulo_swap12:
51 ; CHECK-NEXT: minu a1, a1, a2
52 ; CHECK-NEXT: sltu a0, a1, a0
54 %l0 = icmp ugt i64 %c, %a
55 %l1 = icmp ugt i64 %c, %b
60 ; 4 patterns below will be converted to umax+less.
61 define i1 @ula(i64 %c, i64 %a, i64 %b) {
64 ; CHECK-NEXT: maxu a1, a1, a2
65 ; CHECK-NEXT: sltu a0, a1, a0
67 %l0 = icmp ult i64 %a, %c
68 %l1 = icmp ult i64 %b, %c
69 %res = and i1 %l0, %l1
73 define i1 @ula_swap1(i64 %c, i64 %a, i64 %b) {
74 ; CHECK-LABEL: ula_swap1:
76 ; CHECK-NEXT: maxu a1, a1, a2
77 ; CHECK-NEXT: sltu a0, a1, a0
79 %l0 = icmp ugt i64 %c, %a
80 %l1 = icmp ult i64 %b, %c
81 %res = and i1 %l0, %l1
85 define i1 @ula_swap2(i64 %c, i64 %a, i64 %b) {
86 ; CHECK-LABEL: ula_swap2:
88 ; CHECK-NEXT: maxu a1, a1, a2
89 ; CHECK-NEXT: sltu a0, a1, a0
91 %l0 = icmp ult i64 %a, %c
92 %l1 = icmp ugt i64 %c, %b
93 %res = and i1 %l0, %l1
97 define i1 @ula_swap12(i64 %c, i64 %a, i64 %b) {
98 ; CHECK-LABEL: ula_swap12:
100 ; CHECK-NEXT: maxu a1, a1, a2
101 ; CHECK-NEXT: sltu a0, a1, a0
103 %l0 = icmp ugt i64 %c, %a
104 %l1 = icmp ugt i64 %c, %b
105 %res = and i1 %l0, %l1
109 ; 4 patterns below will be converted to umax+greater
110 ; (greater will be converted to setult somehow)
111 define i1 @ugo(i64 %c, i64 %a, i64 %b) {
114 ; CHECK-NEXT: maxu a1, a1, a2
115 ; CHECK-NEXT: sltu a0, a0, a1
117 %l0 = icmp ugt i64 %a, %c
118 %l1 = icmp ugt i64 %b, %c
119 %res = or i1 %l0, %l1
123 define i1 @ugo_swap1(i64 %c, i64 %a, i64 %b) {
124 ; CHECK-LABEL: ugo_swap1:
126 ; CHECK-NEXT: maxu a1, a1, a2
127 ; CHECK-NEXT: sltu a0, a0, a1
129 %l0 = icmp ult i64 %c, %a
130 %l1 = icmp ugt i64 %b, %c
131 %res = or i1 %l0, %l1
135 define i1 @ugo_swap2(i64 %c, i64 %a, i64 %b) {
136 ; CHECK-LABEL: ugo_swap2:
138 ; CHECK-NEXT: maxu a1, a1, a2
139 ; CHECK-NEXT: sltu a0, a0, a1
141 %l0 = icmp ugt i64 %a, %c
142 %l1 = icmp ult i64 %c, %b
143 %res = or i1 %l0, %l1
147 define i1 @ugo_swap12(i64 %c, i64 %a, i64 %b) {
148 ; CHECK-LABEL: ugo_swap12:
150 ; CHECK-NEXT: maxu a1, a1, a2
151 ; CHECK-NEXT: sltu a0, a0, a1
153 %l0 = icmp ult i64 %c, %a
154 %l1 = icmp ult i64 %c, %b
155 %res = or i1 %l0, %l1
159 ; Pattern below will be converted to umin+greater or equal
160 ; (greater will be converted to setult somehow)
161 define i1 @ugea(i64 %c, i64 %a, i64 %b) {
164 ; CHECK-NEXT: minu a1, a1, a2
165 ; CHECK-NEXT: sltu a0, a1, a0
166 ; CHECK-NEXT: xori a0, a0, 1
168 %l0 = icmp uge i64 %a, %c
169 %l1 = icmp uge i64 %b, %c
170 %res = and i1 %l0, %l1
174 ; Pattern below will be converted to umin+greater
175 ; (greater will be converted to setult somehow)
176 define i1 @uga(i64 %c, i64 %a, i64 %b) {
179 ; CHECK-NEXT: minu a1, a1, a2
180 ; CHECK-NEXT: sltu a0, a0, a1
182 %l0 = icmp ugt i64 %a, %c
183 %l1 = icmp ugt i64 %b, %c
184 %res = and i1 %l0, %l1
188 ; Patterns below will be converted to smax+less.
190 define i1 @sla(i64 %c, i64 %a, i64 %b) {
193 ; CHECK-NEXT: max a1, a1, a2
194 ; CHECK-NEXT: slt a0, a1, a0
196 %l0 = icmp slt i64 %a, %c
197 %l1 = icmp slt i64 %b, %c
198 %res = and i1 %l0, %l1
204 define i1 @flo(float %c, float %a, float %b) {
205 ; CHECK-RV64I-LABEL: flo:
206 ; CHECK-RV64I: # %bb.0:
207 ; CHECK-RV64I-NEXT: addi sp, sp, -32
208 ; CHECK-RV64I-NEXT: .cfi_def_cfa_offset 32
209 ; CHECK-RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
210 ; CHECK-RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
211 ; CHECK-RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
212 ; CHECK-RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
213 ; CHECK-RV64I-NEXT: .cfi_offset ra, -8
214 ; CHECK-RV64I-NEXT: .cfi_offset s0, -16
215 ; CHECK-RV64I-NEXT: .cfi_offset s1, -24
216 ; CHECK-RV64I-NEXT: .cfi_offset s2, -32
217 ; CHECK-RV64I-NEXT: mv s0, a2
218 ; CHECK-RV64I-NEXT: mv s1, a0
219 ; CHECK-RV64I-NEXT: mv a0, a1
220 ; CHECK-RV64I-NEXT: mv a1, s1
221 ; CHECK-RV64I-NEXT: call __gesf2
222 ; CHECK-RV64I-NEXT: mv s2, a0
223 ; CHECK-RV64I-NEXT: mv a0, s0
224 ; CHECK-RV64I-NEXT: mv a1, s1
225 ; CHECK-RV64I-NEXT: call __gesf2
226 ; CHECK-RV64I-NEXT: or a0, s2, a0
227 ; CHECK-RV64I-NEXT: slti a0, a0, 0
228 ; CHECK-RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
229 ; CHECK-RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
230 ; CHECK-RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
231 ; CHECK-RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
232 ; CHECK-RV64I-NEXT: addi sp, sp, 32
233 ; CHECK-RV64I-NEXT: ret
235 ; CHECK-RV64IF-LABEL: flo:
236 ; CHECK-RV64IF: # %bb.0:
237 ; CHECK-RV64IF-NEXT: fle.s a0, fa0, fa1
238 ; CHECK-RV64IF-NEXT: fle.s a1, fa0, fa2
239 ; CHECK-RV64IF-NEXT: and a0, a0, a1
240 ; CHECK-RV64IF-NEXT: xori a0, a0, 1
241 ; CHECK-RV64IF-NEXT: ret
242 %l0 = fcmp ult float %a, %c
243 %l1 = fcmp ult float %b, %c
244 %res = or i1 %l0, %l1
250 define i1 @dlo(double %c, double %a, double %b) {
253 ; CHECK-NEXT: addi sp, sp, -32
254 ; CHECK-NEXT: .cfi_def_cfa_offset 32
255 ; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
256 ; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
257 ; CHECK-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
258 ; CHECK-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
259 ; CHECK-NEXT: .cfi_offset ra, -8
260 ; CHECK-NEXT: .cfi_offset s0, -16
261 ; CHECK-NEXT: .cfi_offset s1, -24
262 ; CHECK-NEXT: .cfi_offset s2, -32
263 ; CHECK-NEXT: mv s0, a2
264 ; CHECK-NEXT: mv s1, a0
265 ; CHECK-NEXT: mv a0, a1
266 ; CHECK-NEXT: mv a1, s1
267 ; CHECK-NEXT: call __gedf2
268 ; CHECK-NEXT: mv s2, a0
269 ; CHECK-NEXT: mv a0, s0
270 ; CHECK-NEXT: mv a1, s1
271 ; CHECK-NEXT: call __gedf2
272 ; CHECK-NEXT: or a0, s2, a0
273 ; CHECK-NEXT: slti a0, a0, 0
274 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
275 ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
276 ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
277 ; CHECK-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
278 ; CHECK-NEXT: addi sp, sp, 32
280 %l0 = fcmp ult double %a, %c
281 %l1 = fcmp ult double %b, %c
282 %res = or i1 %l0, %l1
288 define i1 @multi_user(i64 %c, i64 %a, i64 %b) {
289 ; CHECK-LABEL: multi_user:
291 ; CHECK-NEXT: sltu a1, a1, a0
292 ; CHECK-NEXT: sltu a0, a2, a0
293 ; CHECK-NEXT: or a0, a1, a0
294 ; CHECK-NEXT: and a0, a1, a0
296 %l0 = icmp ugt i64 %c, %a
297 %l1 = icmp ult i64 %b, %c
298 %res = or i1 %l0, %l1
300 %out = and i1 %l0, %res
305 ; No same comparations
306 define i1 @no_same_ops(i64 %c, i64 %a, i64 %b) {
307 ; CHECK-LABEL: no_same_ops:
309 ; CHECK-NEXT: sltu a1, a0, a1
310 ; CHECK-NEXT: sltu a0, a2, a0
311 ; CHECK-NEXT: or a0, a1, a0
313 %l0 = icmp ult i64 %c, %a
314 %l1 = icmp ugt i64 %c, %b
315 %res = or i1 %l0, %l1