1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32,NOZBS32
4 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64,NOZBS64
6 ; RUN: llc -mtriple=riscv32 -mattr=+zbb,+zbs -verify-machineinstrs < %s \
7 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32,ZBS
8 ; RUN: llc -mtriple=riscv64 -mattr=+zbb,+zbs -verify-machineinstrs < %s \
9 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64,ZBS
11 define i32 @and0xabcdefff(i32 %x) {
12 ; CHECK-LABEL: and0xabcdefff:
14 ; CHECK-NEXT: lui a1, 344865
15 ; CHECK-NEXT: andn a0, a0, a1
17 %and = and i32 %x, -1412567041
21 define i32 @orlow13(i32 %x) {
22 ; CHECK-LABEL: orlow13:
24 ; CHECK-NEXT: lui a1, 1048574
25 ; CHECK-NEXT: orn a0, a0, a1
31 define i64 @orlow24(i64 %x) {
32 ; RV32-LABEL: orlow24:
34 ; RV32-NEXT: lui a2, 1044480
35 ; RV32-NEXT: orn a0, a0, a2
38 ; RV64-LABEL: orlow24:
40 ; RV64-NEXT: lui a1, 1044480
41 ; RV64-NEXT: orn a0, a0, a1
43 %or = or i64 %x, 16777215
47 define i32 @xorlow16(i32 %x) {
48 ; CHECK-LABEL: xorlow16:
50 ; CHECK-NEXT: lui a1, 1048560
51 ; CHECK-NEXT: xnor a0, a0, a1
53 %xor = xor i32 %x, 65535
57 define i32 @xorlow31(i32 %x) {
58 ; CHECK-LABEL: xorlow31:
60 ; CHECK-NEXT: lui a1, 524288
61 ; CHECK-NEXT: xnor a0, a0, a1
63 %xor = xor i32 %x, 2147483647
67 define i32 @oraddlow16(i32 %x) {
68 ; RV32-LABEL: oraddlow16:
70 ; RV32-NEXT: lui a1, 16
71 ; RV32-NEXT: addi a1, a1, -1
72 ; RV32-NEXT: or a0, a0, a1
73 ; RV32-NEXT: add a0, a0, a1
76 ; RV64-LABEL: oraddlow16:
78 ; RV64-NEXT: lui a1, 16
79 ; RV64-NEXT: addi a1, a1, -1
80 ; RV64-NEXT: or a0, a0, a1
81 ; RV64-NEXT: addw a0, a0, a1
83 %or = or i32 %x, 65535
84 %add = add nsw i32 %or, 65535
88 define i32 @addorlow16(i32 %x) {
89 ; RV32-LABEL: addorlow16:
91 ; RV32-NEXT: lui a1, 16
92 ; RV32-NEXT: addi a1, a1, -1
93 ; RV32-NEXT: add a0, a0, a1
94 ; RV32-NEXT: or a0, a0, a1
97 ; RV64-LABEL: addorlow16:
99 ; RV64-NEXT: lui a1, 16
100 ; RV64-NEXT: addiw a1, a1, -1
101 ; RV64-NEXT: addw a0, a0, a1
102 ; RV64-NEXT: or a0, a0, a1
104 %add = add nsw i32 %x, 65535
105 %or = or i32 %add, 65535
109 define i32 @andxorlow16(i32 %x) {
110 ; RV32-LABEL: andxorlow16:
112 ; RV32-NEXT: lui a1, 16
113 ; RV32-NEXT: addi a1, a1, -1
114 ; RV32-NEXT: andn a0, a1, a0
117 ; RV64-LABEL: andxorlow16:
119 ; RV64-NEXT: lui a1, 16
120 ; RV64-NEXT: addiw a1, a1, -1
121 ; RV64-NEXT: andn a0, a1, a0
123 %and = and i32 %x, 65535
124 %xor = xor i32 %and, 65535
128 define void @orarray100(ptr %a) {
129 ; RV32-LABEL: orarray100:
130 ; RV32: # %bb.0: # %entry
131 ; RV32-NEXT: li a1, 0
132 ; RV32-NEXT: li a2, 0
133 ; RV32-NEXT: lui a3, 1048560
134 ; RV32-NEXT: .LBB8_1: # %for.body
135 ; RV32-NEXT: # =>This Inner Loop Header: Depth=1
136 ; RV32-NEXT: slli a4, a1, 2
137 ; RV32-NEXT: addi a1, a1, 1
138 ; RV32-NEXT: add a4, a0, a4
139 ; RV32-NEXT: lw a5, 0(a4)
140 ; RV32-NEXT: seqz a6, a1
141 ; RV32-NEXT: add a2, a2, a6
142 ; RV32-NEXT: xori a6, a1, 100
143 ; RV32-NEXT: orn a5, a5, a3
144 ; RV32-NEXT: or a6, a6, a2
145 ; RV32-NEXT: sw a5, 0(a4)
146 ; RV32-NEXT: bnez a6, .LBB8_1
147 ; RV32-NEXT: # %bb.2: # %for.cond.cleanup
150 ; RV64-LABEL: orarray100:
151 ; RV64: # %bb.0: # %entry
152 ; RV64-NEXT: addi a1, a0, 400
153 ; RV64-NEXT: lui a2, 1048560
154 ; RV64-NEXT: .LBB8_1: # %for.body
155 ; RV64-NEXT: # =>This Inner Loop Header: Depth=1
156 ; RV64-NEXT: lw a3, 0(a0)
157 ; RV64-NEXT: orn a3, a3, a2
158 ; RV64-NEXT: sw a3, 0(a0)
159 ; RV64-NEXT: addi a0, a0, 4
160 ; RV64-NEXT: bne a0, a1, .LBB8_1
161 ; RV64-NEXT: # %bb.2: # %for.cond.cleanup
170 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
171 %arrayidx = getelementptr inbounds nuw i32, ptr %a, i64 %indvars.iv
172 %1 = load i32, ptr %arrayidx, align 4
173 %or = or i32 %1, 65535
174 store i32 %or, ptr %arrayidx, align 4
175 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
176 %exitcond.not = icmp eq i64 %indvars.iv.next, 100
177 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
180 define void @orarray3(ptr %a) {
181 ; CHECK-LABEL: orarray3:
183 ; CHECK-NEXT: lw a1, 0(a0)
184 ; CHECK-NEXT: lw a2, 4(a0)
185 ; CHECK-NEXT: lw a3, 8(a0)
186 ; CHECK-NEXT: lui a4, 1048560
187 ; CHECK-NEXT: orn a1, a1, a4
188 ; CHECK-NEXT: orn a2, a2, a4
189 ; CHECK-NEXT: orn a3, a3, a4
190 ; CHECK-NEXT: sw a1, 0(a0)
191 ; CHECK-NEXT: sw a2, 4(a0)
192 ; CHECK-NEXT: sw a3, 8(a0)
194 %1 = load i32, ptr %a, align 4
195 %or = or i32 %1, 65535
196 store i32 %or, ptr %a, align 4
197 %arrayidx.1 = getelementptr inbounds nuw i8, ptr %a, i64 4
198 %2 = load i32, ptr %arrayidx.1, align 4
199 %or.1 = or i32 %2, 65535
200 store i32 %or.1, ptr %arrayidx.1, align 4
201 %arrayidx.2 = getelementptr inbounds nuw i8, ptr %a, i64 8
202 %3 = load i32, ptr %arrayidx.2, align 4
203 %or.2 = or i32 %3, 65535
204 store i32 %or.2, ptr %arrayidx.2, align 4
208 define i32 @andlow16(i32 %x) {
209 ; CHECK-LABEL: andlow16:
211 ; CHECK-NEXT: zext.h a0, a0
213 %and = and i32 %x, 65535
217 define i32 @andlow24(i32 %x) {
218 ; RV32-LABEL: andlow24:
220 ; RV32-NEXT: slli a0, a0, 8
221 ; RV32-NEXT: srli a0, a0, 8
224 ; RV64-LABEL: andlow24:
226 ; RV64-NEXT: slli a0, a0, 40
227 ; RV64-NEXT: srli a0, a0, 40
229 %and = and i32 %x, 16777215
233 define i32 @compl(i32 %x) {
234 ; CHECK-LABEL: compl:
236 ; CHECK-NEXT: not a0, a0
238 %not = xor i32 %x, -1
242 define i32 @orlow12(i32 %x) {
243 ; NOZBS32-LABEL: orlow12:
245 ; NOZBS32-NEXT: lui a1, 1048575
246 ; NOZBS32-NEXT: orn a0, a0, a1
249 ; NOZBS64-LABEL: orlow12:
251 ; NOZBS64-NEXT: lui a1, 1048575
252 ; NOZBS64-NEXT: orn a0, a0, a1
255 ; ZBS-LABEL: orlow12:
257 ; ZBS-NEXT: ori a0, a0, 2047
258 ; ZBS-NEXT: bseti a0, a0, 11
260 %or = or i32 %x, 4095
264 define i32 @xorlow12(i32 %x) {
265 ; NOZBS32-LABEL: xorlow12:
267 ; NOZBS32-NEXT: lui a1, 1048575
268 ; NOZBS32-NEXT: xnor a0, a0, a1
271 ; NOZBS64-LABEL: xorlow12:
273 ; NOZBS64-NEXT: lui a1, 1048575
274 ; NOZBS64-NEXT: xnor a0, a0, a1
277 ; ZBS-LABEL: xorlow12:
279 ; ZBS-NEXT: xori a0, a0, 2047
280 ; ZBS-NEXT: binvi a0, a0, 11
282 %xor = xor i32 %x, 4095
286 define i64 @andimm64(i64 %x) {
287 ; RV32-LABEL: andimm64:
289 ; RV32-NEXT: lui a1, 4080
290 ; RV32-NEXT: andn a0, a0, a1
291 ; RV32-NEXT: li a1, 0
294 ; RV64-LABEL: andimm64:
296 ; RV64-NEXT: lui a1, 983295
297 ; RV64-NEXT: slli a1, a1, 4
298 ; RV64-NEXT: andn a0, a0, a1
300 %and = and i64 %x, 4278255615
304 define i64 @andimm64srli(i64 %x) {
305 ; RV32-LABEL: andimm64srli:
307 ; RV32-NEXT: lui a2, 1040384
308 ; RV32-NEXT: orn a0, a0, a2
309 ; RV32-NEXT: lui a2, 917504
310 ; RV32-NEXT: or a1, a1, a2
313 ; RV64-LABEL: andimm64srli:
315 ; RV64-NEXT: lui a1, 983040
316 ; RV64-NEXT: srli a1, a1, 3
317 ; RV64-NEXT: orn a0, a0, a1
319 %or = or i64 %x, -2305843009180139521