1 ;; Machine description for RISC-V for GNU compiler.
2 ;; Copyright (C) 2011-2025 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
23 ;; Keep this list and the one above riscv_print_operand in sync.
24 ;; The special asm out single letter directives following a '%' are:
25 ;; h -- Print the high-part relocation associated with OP, after stripping
26 ;; any outermost HIGH.
27 ;; R -- Print the low-part relocation associated with OP.
28 ;; C -- Print the integer branch condition for comparison OP.
29 ;; A -- Print the atomic operation suffix for memory model OP.
30 ;; F -- Print a FENCE if the memory model requires a release.
31 ;; z -- Print x0 if OP is zero, otherwise print OP normally.
32 ;; i -- Print i if the operand is not a register.
33 ;; S -- Print shift-index of single-bit mask OP.
34 ;; T -- Print shift-index of inverted single-bit mask OP.
35 ;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything.
37 (define_c_enum "unspec" [
38 ;; Override return address for exception handling.
41 ;; Symbolic accesses. The order of this list must match that of
42 ;; enum riscv_symbol_type in riscv-protos.h.
52 ;; High part of PC-relative address.
55 ;; Floating-point unspecs.
86 ;; the calling convention of callee
92 ;; Workaround for HFmode and BFmode without hardware extension
104 (define_c_enum "unspecv" [
105 ;; Register save and restore.
109 ;; Floating-point unspecs.
116 ;; Interrupt handler instructions.
121 ;; Blockage and synchronization.
126 ;; Stack Smash Protector
137 ;; Zihintpause unspec
153 UNSPECV_XTHEADINT_PUSH
154 UNSPECV_XTHEADINT_POP
158 [(RETURN_ADDR_REGNUM 1)
189 (include "predicates.md")
190 (include "constraints.md")
191 (include "iterators.md")
193 ;; ....................
197 ;; ....................
199 (define_attr "got" "unset,xgot_high,load"
200 (const_string "unset"))
202 ;; Classification of moves, extensions and truncations. Most values
203 ;; are as for "type" (see below) but there are also the following
204 ;; move-specific values:
206 ;; andi a single ANDI instruction
207 ;; shift_shift a shift left followed by a shift right
209 ;; This attribute is used to determine the instruction's length and
210 ;; scheduling type. For doubleword moves, the attribute always describes
211 ;; the split instructions; in some cases, it is more appropriate for the
212 ;; scheduling type to be "multi" instead.
213 (define_attr "move_type"
214 "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
215 const,logical,arith,andi,shift_shift,rdvlenb"
216 (const_string "unknown"))
218 ;; Main data type used by the insn
219 (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,BF,SF,DF,TF,
220 RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,
221 RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,
222 RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,
223 RVVM8BF,RVVM4BF,RVVM2BF,RVVM1BF,RVVMF2BF,RVVMF4BF,
224 RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,
225 RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,
226 RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,
227 RVVM8DI,RVVM4DI,RVVM2DI,RVVM1DI,
228 RVVM8DF,RVVM4DF,RVVM2DF,RVVM1DF,
229 RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,
230 RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,
231 RVVM1x6QI,RVVMF2x6QI,RVVMF4x6QI,RVVMF8x6QI,
232 RVVM1x5QI,RVVMF2x5QI,RVVMF4x5QI,RVVMF8x5QI,
233 RVVM2x4QI,RVVM1x4QI,RVVMF2x4QI,RVVMF4x4QI,RVVMF8x4QI,
234 RVVM2x3QI,RVVM1x3QI,RVVMF2x3QI,RVVMF4x3QI,RVVMF8x3QI,
235 RVVM4x2QI,RVVM2x2QI,RVVM1x2QI,RVVMF2x2QI,RVVMF4x2QI,RVVMF8x2QI,
236 RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,
237 RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,
238 RVVM1x6HI,RVVMF2x6HI,RVVMF4x6HI,
239 RVVM1x5HI,RVVMF2x5HI,RVVMF4x5HI,
240 RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,
241 RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,
242 RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,
243 RVVM1x8BF,RVVMF2x8BF,RVVMF4x8BF,RVVM1x7BF,RVVMF2x7BF,
244 RVVMF4x7BF,RVVM1x6BF,RVVMF2x6BF,RVVMF4x6BF,RVVM1x5BF,
245 RVVMF2x5BF,RVVMF4x5BF,RVVM2x4BF,RVVM1x4BF,RVVMF2x4BF,
246 RVVMF4x4BF,RVVM2x3BF,RVVM1x3BF,RVVMF2x3BF,RVVMF4x3BF,
247 RVVM4x2BF,RVVM2x2BF,RVVM1x2BF,RVVMF2x2BF,RVVMF4x2BF,
248 RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,RVVM1x7HF,RVVMF2x7HF,
249 RVVMF4x7HF,RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,RVVM1x5HF,
250 RVVMF2x5HF,RVVMF4x5HF,RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,
251 RVVMF4x4HF,RVVM2x3HF,RVVM1x3HF,RVVMF2x3HF,RVVMF4x3HF,
252 RVVM4x2HF,RVVM2x2HF,RVVM1x2HF,RVVMF2x2HF,RVVMF4x2HF,
253 RVVM1x8SI,RVVMF2x8SI,
254 RVVM1x7SI,RVVMF2x7SI,
255 RVVM1x6SI,RVVMF2x6SI,
256 RVVM1x5SI,RVVMF2x5SI,
257 RVVM2x4SI,RVVM1x4SI,RVVMF2x4SI,
258 RVVM2x3SI,RVVM1x3SI,RVVMF2x3SI,
259 RVVM4x2SI,RVVM2x2SI,RVVM1x2SI,RVVMF2x2SI,
260 RVVM1x8SF,RVVMF2x8SF,RVVM1x7SF,RVVMF2x7SF,
261 RVVM1x6SF,RVVMF2x6SF,RVVM1x5SF,RVVMF2x5SF,
262 RVVM2x4SF,RVVM1x4SF,RVVMF2x4SF,RVVM2x3SF,
263 RVVM1x3SF,RVVMF2x3SF,RVVM4x2SF,RVVM2x2SF,
264 RVVM1x2SF,RVVMF2x2SF,
265 RVVM1x8DI,RVVM1x7DI,RVVM1x6DI,RVVM1x5DI,
266 RVVM2x4DI,RVVM1x4DI,RVVM2x3DI,RVVM1x3DI,
267 RVVM4x2DI,RVVM2x2DI,RVVM1x2DI,RVVM1x8DF,
268 RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,RVVM2x4DF,
269 RVVM1x4DF,RVVM2x3DF,RVVM1x3DF,RVVM4x2DF,
271 V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,
272 V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,
273 V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,
274 V1DI,V2DI,V4DI,V8DI,V16DI,V32DI,V64DI,V128DI,V256DI,V512DI,
275 V1HF,V2HF,V4HF,V8HF,V16HF,V32HF,V64HF,V128HF,V256HF,V512HF,V1024HF,V2048HF,
276 V1SF,V2SF,V4SF,V8SF,V16SF,V32SF,V64SF,V128SF,V256SF,V512SF,V1024SF,
277 V1DF,V2DF,V4DF,V8DF,V16DF,V32DF,V64DF,V128DF,V256DF,V512DF,
278 V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI"
279 (const_string "unknown"))
281 ;; True if the main data type is twice the size of a word.
282 (define_attr "dword_mode" "no,yes"
283 (cond [(and (eq_attr "mode" "DI,DF")
284 (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
287 (and (eq_attr "mode" "TI,TF")
288 (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
289 (const_string "yes")]
290 (const_string "no")))
293 (define_attr "ext" "base,f,d,vector"
294 (const_string "base"))
296 ;; True if the extension is enabled.
297 (define_attr "ext_enabled" "no,yes"
298 (cond [(eq_attr "ext" "base")
301 (and (eq_attr "ext" "f")
302 (match_test "TARGET_HARD_FLOAT"))
305 (and (eq_attr "ext" "d")
306 (match_test "TARGET_DOUBLE_FLOAT"))
309 (and (eq_attr "ext" "vector")
310 (match_test "TARGET_VECTOR"))
313 (const_string "no")))
315 ;; Classification of each insn.
316 ;; branch conditional branch
317 ;; jump unconditional direct jump
318 ;; jalr unconditional indirect jump
319 ;; ret various returns, no arguments
320 ;; call unconditional call
321 ;; load load instruction(s)
322 ;; fpload floating point load
323 ;; store store instruction(s)
324 ;; fpstore floating point store
325 ;; mtc transfer to coprocessor
326 ;; mfc transfer from coprocessor
327 ;; const load constant
328 ;; arith integer arithmetic instructions
329 ;; logical integer logical instructions
330 ;; shift integer shift instructions
331 ;; slt set less than instructions
332 ;; imul integer multiply
333 ;; idiv integer divide
334 ;; move integer register move (addi rd, rs1, 0)
335 ;; fmove floating point register move
336 ;; fadd floating point add/subtract
337 ;; fmul floating point multiply
338 ;; fmadd floating point multiply-add
339 ;; fdiv floating point divide
340 ;; fcmp floating point compare
341 ;; fcvt floating point convert
342 ;; fcvt_i2f integer to floating point convert
343 ;; fcvt_f2i floating point to integer convert
344 ;; fsqrt floating point square root
345 ;; multi multiword sequence (or user asm statements)
346 ;; auipc integer addition to PC
347 ;; sfb_alu SFB ALU instruction
349 ;; trap trap instruction
350 ;; ghost an instruction that produces no real code
351 ;; bitmanip bit manipulation instructions
352 ;; clmul clmul, clmulh, clmulr
353 ;; rotate rotation instructions
354 ;; atomic atomic instructions
355 ;; condmove conditional moves
356 ;; crypto cryptography instructions
357 ;; mvpair zc move pair instructions
358 ;; zicond zicond instructions
359 ;; Classification of RVV instructions which will be added to each RVV .md pattern and used by scheduler.
360 ;; rdvlenb vector byte length vlenb csrr read
361 ;; rdvl vector length vl csrr read
362 ;; wrvxrm vector fixed-point rounding mode write
363 ;; wrfrm vector floating-point rounding mode write
364 ;; vsetvl vector configuration-setting instrucions
365 ;; 7. Vector Loads and Stores
366 ;; vlde vector unit-stride load instructions
367 ;; vste vector unit-stride store instructions
368 ;; vldm vector unit-stride mask load instructions
369 ;; vstm vector unit-stride mask store instructions
370 ;; vlds vector strided load instructions
371 ;; vsts vector strided store instructions
372 ;; vldux vector unordered indexed load instructions
373 ;; vldox vector ordered indexed load instructions
374 ;; vstux vector unordered indexed store instructions
375 ;; vstox vector ordered indexed store instructions
376 ;; vldff vector unit-stride fault-only-first load instructions
377 ;; vldr vector whole register load instructions
378 ;; vstr vector whole register store instructions
379 ;; vlsegde vector segment unit-stride load instructions
380 ;; vssegte vector segment unit-stride store instructions
381 ;; vlsegds vector segment strided load instructions
382 ;; vssegts vector segment strided store instructions
383 ;; vlsegdux vector segment unordered indexed load instructions
384 ;; vlsegdox vector segment ordered indexed load instructions
385 ;; vssegtux vector segment unordered indexed store instructions
386 ;; vssegtox vector segment ordered indexed store instructions
387 ;; vlsegdff vector segment unit-stride fault-only-first load instructions
388 ;; 11. Vector integer arithmetic instructions
389 ;; vialu vector single-width integer add and subtract and logical nstructions
390 ;; viwalu vector widening integer add/subtract
391 ;; vext vector integer extension
392 ;; vicalu vector arithmetic with carry or borrow instructions
393 ;; vshift vector single-width bit shift instructions
394 ;; vnshift vector narrowing integer shift instructions
395 ;; viminmax vector integer min/max instructions
396 ;; vicmp vector integer comparison instructions
397 ;; vimul vector single-width integer multiply instructions
398 ;; vidiv vector single-width integer divide instructions
399 ;; viwmul vector widening integer multiply instructions
400 ;; vimuladd vector single-width integer multiply-add instructions
401 ;; viwmuladd vector widening integer multiply-add instructions
402 ;; vimerge vector integer merge instructions
403 ;; vimov vector integer move vector instructions
404 ;; 12. Vector fixed-point arithmetic instructions
405 ;; vsalu vector single-width saturating add and subtract and logical instructions
406 ;; vaalu vector single-width averaging add and subtract and logical instructions
407 ;; vsmul vector single-width fractional multiply with rounding and saturation instructions
408 ;; vsshift vector single-width scaling shift instructions
409 ;; vnclip vector narrowing fixed-point clip instructions
410 ;; 13. Vector floating-point instructions
411 ;; vfalu vector single-width floating-point add/subtract instructions
412 ;; vfwalu vector widening floating-point add/subtract instructions
413 ;; vfmul vector single-width floating-point multiply instructions
414 ;; vfdiv vector single-width floating-point divide instructions
415 ;; vfwmul vector widening floating-point multiply instructions
416 ;; vfmuladd vector single-width floating-point multiply-add instructions
417 ;; vfwmuladd vector widening floating-point multiply-add instructions
418 ;; vfsqrt vector floating-point square-root instructions
419 ;; vfrecp vector floating-point reciprocal square-root instructions
420 ;; vfminmax vector floating-point min/max instructions
421 ;; vfcmp vector floating-point comparison instructions
422 ;; vfsgnj vector floating-point sign-injection instructions
423 ;; vfclass vector floating-point classify instruction
424 ;; vfmerge vector floating-point merge instruction
425 ;; vfmov vector floating-point move instruction
426 ;; vfcvtitof vector single-width integer to floating-point instruction
427 ;; vfcvtftoi vector single-width floating-point to integer instruction
428 ;; vfwcvtitof vector widening integer to floating-point instruction
429 ;; vfwcvtftoi vector widening floating-point to integer instruction
430 ;; vfwcvtftof vector widening floating-point to floating-point instruction
431 ;; vfncvtitof vector narrowing integer to floating-point instruction
432 ;; vfncvtftoi vector narrowing floating-point to integer instruction
433 ;; vfncvtftof vector narrowing floating-point to floating-point instruction
434 ;; 14. Vector reduction operations
435 ;; vired vector single-width integer reduction instructions
436 ;; viwred vector widening integer reduction instructions
437 ;; vfredu vector single-width floating-point un-ordered reduction instruction
438 ;; vfredo vector single-width floating-point ordered reduction instruction
439 ;; vfwredu vector widening floating-point un-ordered reduction instruction
440 ;; vfwredo vector widening floating-point ordered reduction instruction
441 ;; 15. Vector mask instructions
442 ;; vmalu vector mask-register logical instructions
443 ;; vmpop vector mask population count
444 ;; vmffs vector find-first-set mask bit
445 ;; vmsfs vector set mask bit
446 ;; vmiota vector iota
447 ;; vmidx vector element index instruction
448 ;; 16. Vector permutation instructions
449 ;; vimovvx integer scalar move instructions
450 ;; vimovxv integer scalar move instructions
451 ;; vfmovvf floating-point scalar move instructions
452 ;; vfmovfv floating-point scalar move instructions
453 ;; vslideup vector slide instructions
454 ;; vslidedown vector slide instructions
455 ;; vislide1up vector slide instructions
456 ;; vislide1down vector slide instructions
457 ;; vfslide1up vector slide instructions
458 ;; vfslide1down vector slide instructions
459 ;; vgather vector register gather instructions
460 ;; vcompress vector compress instruction
461 ;; vmov whole vector register move
462 ;; vector unknown vector instruction
463 ;; 17. Crypto Vector instructions
464 ;; vandn crypto vector bitwise and-not instructions
465 ;; vbrev crypto vector reverse bits in elements instructions
466 ;; vbrev8 crypto vector reverse bits in bytes instructions
467 ;; vrev8 crypto vector reverse bytes instructions
468 ;; vclz crypto vector count leading Zeros instructions
469 ;; vctz crypto vector count lrailing Zeros instructions
470 ;; vrol crypto vector rotate left instructions
471 ;; vror crypto vector rotate right instructions
472 ;; vwsll crypto vector widening shift left logical instructions
473 ;; vclmul crypto vector carry-less multiply - return low half instructions
474 ;; vclmulh crypto vector carry-less multiply - return high half instructions
475 ;; vghsh crypto vector add-multiply over GHASH Galois-Field instructions
476 ;; vgmul crypto vector multiply over GHASH Galois-Field instrumctions
477 ;; vaesef crypto vector AES final-round encryption instructions
478 ;; vaesem crypto vector AES middle-round encryption instructions
479 ;; vaesdf crypto vector AES final-round decryption instructions
480 ;; vaesdm crypto vector AES middle-round decryption instructions
481 ;; vaeskf1 crypto vector AES-128 Forward KeySchedule generation instructions
482 ;; vaeskf2 crypto vector AES-256 Forward KeySchedule generation instructions
483 ;; vaesz crypto vector AES round zero encryption/decryption instructions
484 ;; vsha2ms crypto vector SHA-2 message schedule instructions
485 ;; vsha2ch crypto vector SHA-2 two rounds of compression instructions
486 ;; vsha2cl crypto vector SHA-2 two rounds of compression instructions
487 ;; vsm4k crypto vector SM4 KeyExpansion instructions
488 ;; vsm4r crypto vector SM4 Rounds instructions
489 ;; vsm3me crypto vector SM3 Message Expansion instructions
490 ;; vsm3c crypto vector SM3 Compression instructions
491 ;; 18.Vector BF16 instrctions
492 ;; vfncvtbf16 vector narrowing single floating-point to brain floating-point instruction
493 ;; vfwcvtbf16 vector widening brain floating-point to single floating-point instruction
494 ;; vfwmaccbf16 vector BF16 widening multiply-accumulate
495 ;; SiFive custom extension instrctions
496 ;; sf_vqmacc vector matrix integer multiply-add instructions
497 ;; sf_vfnrclip vector fp32 to int8 ranged clip instructions
499 "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
500 mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
501 fmadd,fdiv,fcmp,fcvt,fcvt_i2f,fcvt_f2i,fsqrt,multi,auipc,sfb_alu,nop,trap,
502 ghost,bitmanip,rotate,clmul,min,max,minu,maxu,clz,ctz,cpop,
503 atomic,condmove,crypto,mvpair,zicond,rdvlenb,rdvl,wrvxrm,wrfrm,
504 rdfrm,vsetvl,vsetvl_pre,vlde,vste,vldm,vstm,vlds,vsts,
505 vldux,vldox,vstux,vstox,vldff,vldr,vstr,
506 vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
507 vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
508 vimul,vidiv,viwmul,vimuladd,sf_vqmacc,viwmuladd,vimerge,vimov,
509 vsalu,vaalu,vsmul,vsshift,vnclip,sf_vfnrclip,
510 vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
511 vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
512 vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
513 vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
514 vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
515 vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
516 vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
517 vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
518 vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
519 vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16"
520 (cond [(eq_attr "got" "load") (const_string "load")
522 ;; If a doubleword move uses these expensive instructions,
523 ;; it is usually better to schedule them in the same way
524 ;; as the singleword form, rather than as "multi".
525 (eq_attr "move_type" "load") (const_string "load")
526 (eq_attr "move_type" "fpload") (const_string "fpload")
527 (eq_attr "move_type" "store") (const_string "store")
528 (eq_attr "move_type" "fpstore") (const_string "fpstore")
529 (eq_attr "move_type" "mtc") (const_string "mtc")
530 (eq_attr "move_type" "mfc") (const_string "mfc")
532 ;; These types of move are always single insns.
533 (eq_attr "move_type" "fmove") (const_string "fmove")
534 (eq_attr "move_type" "arith") (const_string "arith")
535 (eq_attr "move_type" "logical") (const_string "logical")
536 (eq_attr "move_type" "andi") (const_string "logical")
538 ;; These types of move are always split.
539 (eq_attr "move_type" "shift_shift")
540 (const_string "multi")
542 ;; These types of move are split for doubleword modes only.
543 (and (eq_attr "move_type" "move,const")
544 (eq_attr "dword_mode" "yes"))
545 (const_string "multi")
546 (eq_attr "move_type" "move") (const_string "move")
547 (eq_attr "move_type" "const") (const_string "const")
548 (eq_attr "move_type" "rdvlenb") (const_string "rdvlenb")]
549 (const_string "unknown")))
551 ;; True if the float point vector is disabled.
552 (define_attr "fp_vector_disabled" "no,yes"
554 (and (eq_attr "type" "vfmov,vfalu,vfmul,vfdiv,
555 vfwalu,vfwmul,vfmuladd,vfwmuladd,
556 vfsqrt,vfrecp,vfminmax,vfsgnj,vfcmp,
558 vfncvtitof,vfwcvtftoi,vfcvtftoi,vfcvtitof,
559 vfredo,vfredu,vfwredo,vfwredu,
560 vfslide1up,vfslide1down")
561 (and (eq_attr "mode" "RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF")
562 (match_test "!TARGET_ZVFH")))
565 ;; The mode records as QI for the FP16 <=> INT8 instruction.
566 (and (eq_attr "type" "vfncvtftoi,vfwcvtitof")
567 (and (eq_attr "mode" "RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI")
568 (match_test "!TARGET_ZVFH")))
571 (const_string "no")))
573 ;; This attribute marks the alternatives not matching the constraints
574 ;; described in spec as disabled.
575 (define_attr "spec_restriction" "none,thv,rvv"
576 (const_string "none"))
578 (define_attr "spec_restriction_disabled" "no,yes"
579 (cond [(eq_attr "spec_restriction" "none")
582 (and (eq_attr "spec_restriction" "thv")
583 (match_test "TARGET_XTHEADVECTOR"))
586 (and (eq_attr "spec_restriction" "rvv")
587 (match_test "TARGET_VECTOR && !TARGET_XTHEADVECTOR"))
590 (const_string "no")))
592 ;; Attribute to control enable or disable instructions.
593 (define_attr "enabled" "no,yes"
595 (eq_attr "ext_enabled" "no")
598 (eq_attr "fp_vector_disabled" "yes")
601 (eq_attr "spec_restriction_disabled" "yes")
604 (const_string "yes")))
606 ;; Length of instruction in bytes.
607 (define_attr "length" ""
609 ;; Branches further than +/- 1 MiB require three instructions.
610 ;; Branches further than +/- 4 KiB require two instructions.
611 (eq_attr "type" "branch")
612 (if_then_else (and (le (minus (match_dup 0) (pc))
614 (le (minus (pc) (match_dup 0))
617 (if_then_else (and (le (minus (match_dup 0) (pc))
619 (le (minus (pc) (match_dup 0))
620 (const_int 1048572)))
624 ;; Jumps further than +/- 1 MiB require two instructions.
625 (eq_attr "type" "jump")
626 (if_then_else (and (le (minus (match_dup 0) (pc))
628 (le (minus (pc) (match_dup 0))
629 (const_int 1048572)))
633 ;; Conservatively assume calls take two instructions (AUIPC + JALR).
634 ;; The linker will opportunistically relax the sequence to JAL.
635 (eq_attr "type" "call") (const_int 8)
637 ;; "Ghost" instructions occupy no space.
638 (eq_attr "type" "ghost") (const_int 0)
640 (eq_attr "got" "load") (const_int 8)
642 ;; SHIFT_SHIFTs are decomposed into two separate instructions.
643 (eq_attr "move_type" "shift_shift")
646 ;; Check for doubleword moves that are decomposed into two
648 (and (eq_attr "move_type" "mtc,mfc,move")
649 (eq_attr "dword_mode" "yes"))
652 ;; Doubleword CONST{,N} moves are split into two word
654 (and (eq_attr "move_type" "const")
655 (eq_attr "dword_mode" "yes"))
656 (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
658 ;; Otherwise, constants, loads and stores are handled by external
660 (eq_attr "move_type" "load,fpload")
661 (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
662 (eq_attr "move_type" "store,fpstore")
663 (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
666 ;; Is copying of this instruction disallowed?
667 (define_attr "cannot_copy" "no,yes" (const_string "no"))
669 ;; Microarchitectures we know how to tune for.
670 ;; Keep this in sync with enum riscv_microarchitecture.
672 "generic,sifive_7,sifive_p400,sifive_p600,xiangshan,generic_ooo"
673 (const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
675 ;; Describe a user's asm statement.
676 (define_asm_attributes
677 [(set_attr "type" "multi")])
679 ;; Ghost instructions produce no real code and introduce no hazards.
680 ;; They exist purely to express an effect on dataflow.
681 (define_insn_reservation "ghost" 0
682 (eq_attr "type" "ghost")
686 ;; ....................
690 ;; ....................
693 (define_insn "add<mode>3"
694 [(set (match_operand:ANYF 0 "register_operand" "=f")
695 (plus:ANYF (match_operand:ANYF 1 "register_operand" " f")
696 (match_operand:ANYF 2 "register_operand" " f")))]
697 "TARGET_HARD_FLOAT || TARGET_ZFINX"
698 "fadd.<fmt>\t%0,%1,%2"
699 [(set_attr "type" "fadd")
700 (set_attr "mode" "<UNITMODE>")])
702 (define_insn "*addsi3"
703 [(set (match_operand:SI 0 "register_operand" "=r,r")
704 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
705 (match_operand:SI 2 "arith_operand" " r,I")))]
708 [(set_attr "type" "arith")
709 (set_attr "mode" "SI")])
711 (define_expand "addsi3"
712 [(set (match_operand:SI 0 "register_operand" "=r,r")
713 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
714 (match_operand:SI 2 "arith_operand" " r,I")))]
719 rtx t = gen_reg_rtx (DImode);
720 emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
721 t = gen_lowpart (SImode, t);
722 SUBREG_PROMOTED_VAR_P (t) = 1;
723 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
724 emit_move_insn (operands[0], t);
729 (define_insn "adddi3"
730 [(set (match_operand:DI 0 "register_operand" "=r,r")
731 (plus:DI (match_operand:DI 1 "register_operand" " r,r")
732 (match_operand:DI 2 "arith_operand" " r,I")))]
735 [(set_attr "type" "arith")
736 (set_attr "mode" "DI")])
738 ;; Special case of adding a reg and constant if latter is sum of two S12
739 ;; values (in range -2048 to 2047). Avoid materialized the const and fuse
740 ;; into the add (with an additional add for 2nd value). Makes a 3 insn
741 ;; sequence into 2 insn.
743 (define_insn_and_split "*add<mode>3_const_sum_of_two_s12"
744 [(set (match_operand:P 0 "register_operand" "=r,r")
745 (plus:P (match_operand:P 1 "register_operand" " r,r")
746 (match_operand:P 2 "const_two_s12" " MiG,r")))]
747 "!riscv_reg_frame_related (operands[0])"
749 /* operand matching MiG constraint is always meant to be split. */
750 if (which_alternative == 0)
753 return "add %0,%1,%2";
757 (plus:P (match_dup 1) (match_dup 3)))
759 (plus:P (match_dup 0) (match_dup 4)))]
761 int val = INTVAL (operands[2]);
762 if (SUM_OF_TWO_S12_P (val))
764 operands[3] = GEN_INT (2047);
765 operands[4] = GEN_INT (val - 2047);
767 else if (SUM_OF_TWO_S12_N (val))
769 operands[3] = GEN_INT (-2048);
770 operands[4] = GEN_INT (val + 2048);
775 [(set_attr "type" "arith")
776 (set_attr "mode" "<P:MODE>")])
778 (define_expand "addv<mode>4"
779 [(set (match_operand:GPR 0 "register_operand" "=r,r")
780 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
781 (match_operand:GPR 2 "arith_operand" " r,I")))
782 (label_ref (match_operand 3 "" ""))]
785 if (TARGET_64BIT && <MODE>mode == SImode)
787 rtx t3 = gen_reg_rtx (DImode);
788 rtx t4 = gen_reg_rtx (DImode);
789 rtx t5 = gen_reg_rtx (DImode);
790 rtx t6 = gen_reg_rtx (DImode);
792 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
793 if (GET_CODE (operands[1]) != CONST_INT)
794 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
797 if (GET_CODE (operands[2]) != CONST_INT)
798 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
801 emit_insn (gen_adddi3 (t3, t4, t5));
802 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
804 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
808 rtx t3 = gen_reg_rtx (<MODE>mode);
809 rtx t4 = gen_reg_rtx (<MODE>mode);
811 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
812 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
813 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
814 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[0], operands[1]);
816 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[0], operands[1]));
817 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
822 (define_expand "uaddv<mode>4"
823 [(set (match_operand:GPR 0 "register_operand" "=r,r")
824 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
825 (match_operand:GPR 2 "arith_operand" " r,I")))
826 (label_ref (match_operand 3 "" ""))]
829 if (TARGET_64BIT && <MODE>mode == SImode)
831 rtx t3 = gen_reg_rtx (DImode);
832 rtx t4 = gen_reg_rtx (DImode);
834 if (GET_CODE (operands[1]) != CONST_INT)
835 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
838 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
839 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
841 riscv_expand_conditional_branch (operands[3], LTU, t4, t3);
845 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
846 riscv_expand_conditional_branch (operands[3], LTU, operands[0],
853 (define_insn "addsi3_extended"
854 [(set (match_operand:DI 0 "register_operand" "=r,r")
856 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
857 (match_operand:SI 2 "arith_operand" " r,I"))))]
860 [(set_attr "type" "arith")
861 (set_attr "mode" "SI")])
863 (define_insn "*addsi3_extended2"
864 [(set (match_operand:DI 0 "register_operand" "=r,r")
866 (match_operator:SI 3 "subreg_lowpart_operator"
867 [(plus:DI (match_operand:DI 1 "register_operand" " r,r")
868 (match_operand:DI 2 "arith_operand" " r,I"))])))]
871 [(set_attr "type" "arith")
872 (set_attr "mode" "SI")])
874 ;; Transform (X & C1) + C2 into (X | ~C1) - (-C2 | ~C1)
875 ;; Where C1 is not a LUI operand, but ~C1 is a LUI operand
877 (define_insn_and_split "*lui_constraint<X:mode>_and_to_or"
878 [(set (match_operand:X 0 "register_operand" "=r")
879 (plus:X (and:X (match_operand:X 1 "register_operand" "r")
880 (match_operand 2 "const_int_operand"))
881 (match_operand 3 "const_int_operand")))
882 (clobber (match_scratch:X 4 "=&r"))]
883 "(LUI_OPERAND (~INTVAL (operands[2]))
884 && ((INTVAL (operands[2]) & (-INTVAL (operands[3])))
885 == (-INTVAL (operands[3])))
886 && riscv_const_insns (operands[3], false)
887 && (riscv_const_insns (GEN_INT (~INTVAL (operands[2])
888 | -INTVAL (operands[3])), false)
889 <= riscv_const_insns (operands[3], false)))"
891 "&& reload_completed"
892 [(set (match_dup 4) (match_dup 5))
893 (set (match_dup 0) (ior:X (match_dup 1) (match_dup 4)))
894 (set (match_dup 4) (match_dup 6))
895 (set (match_dup 0) (minus:X (match_dup 0) (match_dup 4)))]
897 operands[5] = GEN_INT (~INTVAL (operands[2]));
898 operands[6] = GEN_INT ((~INTVAL (operands[2])) | (-INTVAL (operands[3])));
900 [(set_attr "type" "arith")])
903 ;; ....................
907 ;; ....................
910 (define_insn "sub<mode>3"
911 [(set (match_operand:ANYF 0 "register_operand" "=f")
912 (minus:ANYF (match_operand:ANYF 1 "register_operand" " f")
913 (match_operand:ANYF 2 "register_operand" " f")))]
914 "TARGET_HARD_FLOAT || TARGET_ZFINX"
915 "fsub.<fmt>\t%0,%1,%2"
916 [(set_attr "type" "fadd")
917 (set_attr "mode" "<UNITMODE>")])
919 (define_insn "subdi3"
920 [(set (match_operand:DI 0 "register_operand" "= r")
921 (minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
922 (match_operand:DI 2 "register_operand" " r")))]
925 [(set_attr "type" "arith")
926 (set_attr "mode" "DI")])
928 (define_insn "*subsi3"
929 [(set (match_operand:SI 0 "register_operand" "= r")
930 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
931 (match_operand:SI 2 "register_operand" " r")))]
934 [(set_attr "type" "arith")
935 (set_attr "mode" "SI")])
937 (define_expand "subsi3"
938 [(set (match_operand:SI 0 "register_operand" "= r")
939 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
940 (match_operand:SI 2 "register_operand" " r")))]
945 rtx t = gen_reg_rtx (DImode);
946 emit_insn (gen_subsi3_extended (t, operands[1], operands[2]));
947 t = gen_lowpart (SImode, t);
948 SUBREG_PROMOTED_VAR_P (t) = 1;
949 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
950 emit_move_insn (operands[0], t);
955 (define_expand "subv<mode>4"
956 [(set (match_operand:GPR 0 "register_operand" "= r")
957 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
958 (match_operand:GPR 2 "register_operand" " r")))
959 (label_ref (match_operand 3 "" ""))]
962 if (TARGET_64BIT && <MODE>mode == SImode)
964 rtx t3 = gen_reg_rtx (DImode);
965 rtx t4 = gen_reg_rtx (DImode);
966 rtx t5 = gen_reg_rtx (DImode);
967 rtx t6 = gen_reg_rtx (DImode);
969 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
970 if (GET_CODE (operands[1]) != CONST_INT)
971 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
974 if (GET_CODE (operands[2]) != CONST_INT)
975 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
978 emit_insn (gen_subdi3 (t3, t4, t5));
979 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
981 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
985 rtx t3 = gen_reg_rtx (<MODE>mode);
986 rtx t4 = gen_reg_rtx (<MODE>mode);
988 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
990 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
991 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
993 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[1], operands[0]);
994 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[1], operands[0]));
996 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
1002 (define_expand "usubv<mode>4"
1003 [(set (match_operand:GPR 0 "register_operand" "= r")
1004 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
1005 (match_operand:GPR 2 "register_operand" " r")))
1006 (label_ref (match_operand 3 "" ""))]
1009 if (TARGET_64BIT && <MODE>mode == SImode)
1011 rtx t3 = gen_reg_rtx (DImode);
1012 rtx t4 = gen_reg_rtx (DImode);
1014 if (GET_CODE (operands[1]) != CONST_INT)
1015 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
1018 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
1019 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
1021 riscv_expand_conditional_branch (operands[3], LTU, t3, t4);
1025 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
1026 riscv_expand_conditional_branch (operands[3], LTU, operands[1],
1034 (define_insn "subsi3_extended"
1035 [(set (match_operand:DI 0 "register_operand" "= r")
1037 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
1038 (match_operand:SI 2 "register_operand" " r"))))]
1041 [(set_attr "type" "arith")
1042 (set_attr "mode" "SI")])
1044 (define_insn "*subsi3_extended2"
1045 [(set (match_operand:DI 0 "register_operand" "= r")
1047 (match_operator:SI 3 "subreg_lowpart_operator"
1048 [(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
1049 (match_operand:DI 2 "register_operand" " r"))])))]
1052 [(set_attr "type" "arith")
1053 (set_attr "mode" "SI")])
1055 (define_insn "negdi2"
1056 [(set (match_operand:DI 0 "register_operand" "=r")
1057 (neg:DI (match_operand:DI 1 "register_operand" " r")))]
1060 [(set_attr "type" "arith")
1061 (set_attr "mode" "DI")])
1063 (define_insn "*negsi2"
1064 [(set (match_operand:SI 0 "register_operand" "=r")
1065 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
1068 [(set_attr "type" "arith")
1069 (set_attr "mode" "SI")])
1071 (define_expand "negsi2"
1072 [(set (match_operand:SI 0 "register_operand" "=r")
1073 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
1078 rtx t = gen_reg_rtx (DImode);
1079 emit_insn (gen_negsi2_extended (t, operands[1]));
1080 t = gen_lowpart (SImode, t);
1081 SUBREG_PROMOTED_VAR_P (t) = 1;
1082 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1083 emit_move_insn (operands[0], t);
1088 (define_insn "negsi2_extended"
1089 [(set (match_operand:DI 0 "register_operand" "=r")
1091 (neg:SI (match_operand:SI 1 "register_operand" " r"))))]
1094 [(set_attr "type" "arith")
1095 (set_attr "mode" "SI")])
1097 (define_insn "*negsi2_extended2"
1098 [(set (match_operand:DI 0 "register_operand" "=r")
1100 (match_operator:SI 2 "subreg_lowpart_operator"
1101 [(neg:DI (match_operand:DI 1 "register_operand" " r"))])))]
1104 [(set_attr "type" "arith")
1105 (set_attr "mode" "SI")])
1108 ;; ....................
1112 ;; ....................
1115 (define_insn "mul<mode>3"
1116 [(set (match_operand:ANYF 0 "register_operand" "=f")
1117 (mult:ANYF (match_operand:ANYF 1 "register_operand" " f")
1118 (match_operand:ANYF 2 "register_operand" " f")))]
1119 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1120 "fmul.<fmt>\t%0,%1,%2"
1121 [(set_attr "type" "fmul")
1122 (set_attr "mode" "<UNITMODE>")])
1124 (define_insn "*mulsi3"
1125 [(set (match_operand:SI 0 "register_operand" "=r")
1126 (mult:SI (match_operand:SI 1 "register_operand" " r")
1127 (match_operand:SI 2 "register_operand" " r")))]
1128 "TARGET_ZMMUL || TARGET_MUL"
1130 [(set_attr "type" "imul")
1131 (set_attr "mode" "SI")])
1133 (define_expand "mulsi3"
1134 [(set (match_operand:SI 0 "register_operand" "=r")
1135 (mult:SI (match_operand:SI 1 "register_operand" " r")
1136 (match_operand:SI 2 "register_operand" " r")))]
1137 "TARGET_ZMMUL || TARGET_MUL"
1141 rtx t = gen_reg_rtx (DImode);
1142 emit_insn (gen_mulsi3_extended (t, operands[1], operands[2]));
1143 t = gen_lowpart (SImode, t);
1144 SUBREG_PROMOTED_VAR_P (t) = 1;
1145 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1146 emit_move_insn (operands[0], t);
1151 (define_insn "muldi3"
1152 [(set (match_operand:DI 0 "register_operand" "=r")
1153 (mult:DI (match_operand:DI 1 "register_operand" " r")
1154 (match_operand:DI 2 "register_operand" " r")))]
1155 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1157 [(set_attr "type" "imul")
1158 (set_attr "mode" "DI")])
1160 (define_expand "mulv<mode>4"
1161 [(set (match_operand:GPR 0 "register_operand" "=r")
1162 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1163 (match_operand:GPR 2 "register_operand" " r")))
1164 (label_ref (match_operand 3 "" ""))]
1165 "TARGET_ZMMUL || TARGET_MUL"
1167 if (TARGET_64BIT && <MODE>mode == SImode)
1169 rtx t3 = gen_reg_rtx (DImode);
1170 rtx t4 = gen_reg_rtx (DImode);
1171 rtx t5 = gen_reg_rtx (DImode);
1172 rtx t6 = gen_reg_rtx (DImode);
1174 if (GET_CODE (operands[1]) != CONST_INT)
1175 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
1178 if (GET_CODE (operands[2]) != CONST_INT)
1179 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
1182 emit_insn (gen_muldi3 (t3, t4, t5));
1184 emit_move_insn (operands[0], gen_lowpart (SImode, t3));
1185 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
1187 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
1191 rtx hp = gen_reg_rtx (<MODE>mode);
1192 rtx lp = gen_reg_rtx (<MODE>mode);
1194 emit_insn (gen_smul<mode>3_highpart (hp, operands[1], operands[2]));
1195 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1196 riscv_emit_binary (ASHIFTRT, lp, operands[0],
1197 GEN_INT (BITS_PER_WORD - 1));
1199 riscv_expand_conditional_branch (operands[3], NE, hp, lp);
1205 (define_expand "umulv<mode>4"
1206 [(set (match_operand:GPR 0 "register_operand" "=r")
1207 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1208 (match_operand:GPR 2 "register_operand" " r")))
1209 (label_ref (match_operand 3 "" ""))]
1210 "TARGET_ZMMUL || TARGET_MUL"
1212 if (TARGET_64BIT && <MODE>mode == SImode)
1214 rtx t3 = gen_reg_rtx (DImode);
1215 rtx t4 = gen_reg_rtx (DImode);
1216 rtx t5 = gen_reg_rtx (DImode);
1217 rtx t6 = gen_reg_rtx (DImode);
1218 rtx t7 = gen_reg_rtx (DImode);
1219 rtx t8 = gen_reg_rtx (DImode);
1221 if (GET_CODE (operands[1]) != CONST_INT)
1222 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
1225 if (GET_CODE (operands[2]) != CONST_INT)
1226 emit_insn (gen_extend_insn (t4, operands[2], DImode, SImode, 0));
1230 emit_insn (gen_ashldi3 (t5, t3, GEN_INT (32)));
1231 emit_insn (gen_ashldi3 (t6, t4, GEN_INT (32)));
1232 emit_insn (gen_umuldi3_highpart (t7, t5, t6));
1233 emit_move_insn (operands[0], gen_lowpart (SImode, t7));
1234 emit_insn (gen_lshrdi3 (t8, t7, GEN_INT (32)));
1236 riscv_expand_conditional_branch (operands[3], NE, t8, const0_rtx);
1240 rtx hp = gen_reg_rtx (<MODE>mode);
1242 emit_insn (gen_umul<mode>3_highpart (hp, operands[1], operands[2]));
1243 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1245 riscv_expand_conditional_branch (operands[3], NE, hp, const0_rtx);
1251 (define_insn "mulsi3_extended"
1252 [(set (match_operand:DI 0 "register_operand" "=r")
1254 (mult:SI (match_operand:SI 1 "register_operand" " r")
1255 (match_operand:SI 2 "register_operand" " r"))))]
1256 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1258 [(set_attr "type" "imul")
1259 (set_attr "mode" "SI")])
1261 (define_insn "*mulsi3_extended2"
1262 [(set (match_operand:DI 0 "register_operand" "=r")
1264 (match_operator:SI 3 "subreg_lowpart_operator"
1265 [(mult:DI (match_operand:DI 1 "register_operand" " r")
1266 (match_operand:DI 2 "register_operand" " r"))])))]
1267 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1269 [(set_attr "type" "imul")
1270 (set_attr "mode" "SI")])
1273 ;; ........................
1275 ;; MULTIPLICATION HIGH-PART
1277 ;; ........................
1281 (define_expand "<u>mulditi3"
1282 [(set (match_operand:TI 0 "register_operand")
1283 (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
1284 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
1285 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1287 rtx low = gen_reg_rtx (DImode);
1288 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1290 rtx high = gen_reg_rtx (DImode);
1291 emit_insn (gen_<su>muldi3_highpart (high, operands[1], operands[2]));
1293 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1294 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1298 (define_insn "<su>muldi3_highpart"
1299 [(set (match_operand:DI 0 "register_operand" "=r")
1302 (mult:TI (any_extend:TI
1303 (match_operand:DI 1 "register_operand" " r"))
1305 (match_operand:DI 2 "register_operand" " r")))
1307 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1309 [(set_attr "type" "imul")
1310 (set_attr "mode" "DI")])
1312 (define_expand "usmulditi3"
1313 [(set (match_operand:TI 0 "register_operand")
1314 (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
1315 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
1316 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1318 rtx low = gen_reg_rtx (DImode);
1319 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1321 rtx high = gen_reg_rtx (DImode);
1322 emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
1324 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1325 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1329 (define_insn "usmuldi3_highpart"
1330 [(set (match_operand:DI 0 "register_operand" "=r")
1333 (mult:TI (zero_extend:TI
1334 (match_operand:DI 1 "register_operand" "r"))
1336 (match_operand:DI 2 "register_operand" " r")))
1338 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1340 [(set_attr "type" "imul")
1341 (set_attr "mode" "DI")])
1343 (define_expand "<u>mulsidi3"
1344 [(set (match_operand:DI 0 "register_operand" "=r")
1345 (mult:DI (any_extend:DI
1346 (match_operand:SI 1 "register_operand" " r"))
1348 (match_operand:SI 2 "register_operand" " r"))))]
1349 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1351 rtx temp = gen_reg_rtx (SImode);
1352 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1353 emit_insn (gen_<su>mulsi3_highpart (riscv_subword (operands[0], true),
1354 operands[1], operands[2]));
1355 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1359 (define_insn "<su>mulsi3_highpart"
1360 [(set (match_operand:SI 0 "register_operand" "=r")
1363 (mult:DI (any_extend:DI
1364 (match_operand:SI 1 "register_operand" " r"))
1366 (match_operand:SI 2 "register_operand" " r")))
1368 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1370 [(set_attr "type" "imul")
1371 (set_attr "mode" "SI")])
1374 (define_expand "usmulsidi3"
1375 [(set (match_operand:DI 0 "register_operand" "=r")
1376 (mult:DI (zero_extend:DI
1377 (match_operand:SI 1 "register_operand" " r"))
1379 (match_operand:SI 2 "register_operand" " r"))))]
1380 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1382 rtx temp = gen_reg_rtx (SImode);
1383 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1384 emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
1385 operands[1], operands[2]));
1386 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1390 (define_insn "usmulsi3_highpart"
1391 [(set (match_operand:SI 0 "register_operand" "=r")
1394 (mult:DI (zero_extend:DI
1395 (match_operand:SI 1 "register_operand" " r"))
1397 (match_operand:SI 2 "register_operand" " r")))
1399 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1401 [(set_attr "type" "imul")
1402 (set_attr "mode" "SI")])
1405 ;; ....................
1407 ;; DIVISION and REMAINDER
1409 ;; ....................
1412 (define_insn "*<optab>si3"
1413 [(set (match_operand:SI 0 "register_operand" "=r")
1414 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1415 (match_operand:SI 2 "register_operand" " r")))]
1417 "<insn>%i2%~\t%0,%1,%2"
1418 [(set_attr "type" "idiv")
1419 (set_attr "mode" "SI")])
1421 (define_expand "<optab>si3"
1422 [(set (match_operand:SI 0 "register_operand" "=r")
1423 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1424 (match_operand:SI 2 "register_operand" " r")))]
1429 rtx t = gen_reg_rtx (DImode);
1430 emit_insn (gen_<optab>si3_extended (t, operands[1], operands[2]));
1431 t = gen_lowpart (SImode, t);
1432 SUBREG_PROMOTED_VAR_P (t) = 1;
1433 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1434 emit_move_insn (operands[0], t);
1439 (define_insn "<optab>di3"
1440 [(set (match_operand:DI 0 "register_operand" "=r")
1441 (any_div:DI (match_operand:DI 1 "register_operand" " r")
1442 (match_operand:DI 2 "register_operand" " r")))]
1443 "TARGET_DIV && TARGET_64BIT"
1444 "<insn>%i2\t%0,%1,%2"
1445 [(set_attr "type" "idiv")
1446 (set_attr "mode" "DI")])
1448 (define_expand "<u>divmod<mode>4"
1450 [(set (match_operand:GPR 0 "register_operand")
1451 (only_div:GPR (match_operand:GPR 1 "register_operand")
1452 (match_operand:GPR 2 "register_operand")))
1453 (set (match_operand:GPR 3 "register_operand")
1454 (<paired_mod>:GPR (match_dup 1) (match_dup 2)))])]
1455 "TARGET_DIV && riscv_use_divmod_expander ()"
1457 rtx tmp = gen_reg_rtx (<MODE>mode);
1458 emit_insn (gen_<u>div<GPR:mode>3 (operands[0], operands[1], operands[2]));
1459 emit_insn (gen_mul<GPR:mode>3 (tmp, operands[0], operands[2]));
1460 emit_insn (gen_sub<GPR:mode>3 (operands[3], operands[1], tmp));
1464 (define_insn "<optab>si3_extended"
1465 [(set (match_operand:DI 0 "register_operand" "=r")
1467 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1468 (match_operand:SI 2 "register_operand" " r"))))]
1469 "TARGET_DIV && TARGET_64BIT"
1470 "<insn>%i2w\t%0,%1,%2"
1471 [(set_attr "type" "idiv")
1472 (set_attr "mode" "DI")])
1474 (define_insn "div<mode>3"
1475 [(set (match_operand:ANYF 0 "register_operand" "=f")
1476 (div:ANYF (match_operand:ANYF 1 "register_operand" " f")
1477 (match_operand:ANYF 2 "register_operand" " f")))]
1478 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1479 "fdiv.<fmt>\t%0,%1,%2"
1480 [(set_attr "type" "fdiv")
1481 (set_attr "mode" "<UNITMODE>")])
1484 ;; ....................
1488 ;; ....................
1490 (define_insn "sqrt<mode>2"
1491 [(set (match_operand:ANYF 0 "register_operand" "=f")
1492 (sqrt:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1493 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1495 return "fsqrt.<fmt>\t%0,%1";
1497 [(set_attr "type" "fsqrt")
1498 (set_attr "mode" "<UNITMODE>")])
1500 ;; Floating point multiply accumulate instructions.
1503 (define_insn "fma<mode>4"
1504 [(set (match_operand:ANYF 0 "register_operand" "=f")
1505 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1506 (match_operand:ANYF 2 "register_operand" " f")
1507 (match_operand:ANYF 3 "register_operand" " f")))]
1508 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1509 "fmadd.<fmt>\t%0,%1,%2,%3"
1510 [(set_attr "type" "fmadd")
1511 (set_attr "mode" "<UNITMODE>")])
1514 (define_insn "fms<mode>4"
1515 [(set (match_operand:ANYF 0 "register_operand" "=f")
1516 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1517 (match_operand:ANYF 2 "register_operand" " f")
1518 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1519 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1520 "fmsub.<fmt>\t%0,%1,%2,%3"
1521 [(set_attr "type" "fmadd")
1522 (set_attr "mode" "<UNITMODE>")])
1525 (define_insn "fnms<mode>4"
1526 [(set (match_operand:ANYF 0 "register_operand" "=f")
1528 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1529 (match_operand:ANYF 2 "register_operand" " f")
1530 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1531 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1532 "fnmadd.<fmt>\t%0,%1,%2,%3"
1533 [(set_attr "type" "fmadd")
1534 (set_attr "mode" "<UNITMODE>")])
1537 (define_insn "fnma<mode>4"
1538 [(set (match_operand:ANYF 0 "register_operand" "=f")
1540 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1541 (match_operand:ANYF 2 "register_operand" " f")
1542 (match_operand:ANYF 3 "register_operand" " f")))]
1543 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1544 "fnmsub.<fmt>\t%0,%1,%2,%3"
1545 [(set_attr "type" "fmadd")
1546 (set_attr "mode" "<UNITMODE>")])
1548 ;; -(-a * b - c), modulo signed zeros
1549 (define_insn "*fma<mode>4"
1550 [(set (match_operand:ANYF 0 "register_operand" "=f")
1553 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1554 (match_operand:ANYF 2 "register_operand" " f")
1555 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1556 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1557 "fmadd.<fmt>\t%0,%1,%2,%3"
1558 [(set_attr "type" "fmadd")
1559 (set_attr "mode" "<UNITMODE>")])
1561 ;; -(-a * b + c), modulo signed zeros
1562 (define_insn "*fms<mode>4"
1563 [(set (match_operand:ANYF 0 "register_operand" "=f")
1566 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1567 (match_operand:ANYF 2 "register_operand" " f")
1568 (match_operand:ANYF 3 "register_operand" " f"))))]
1569 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1570 "fmsub.<fmt>\t%0,%1,%2,%3"
1571 [(set_attr "type" "fmadd")
1572 (set_attr "mode" "<UNITMODE>")])
1574 ;; -(a * b + c), modulo signed zeros
1575 (define_insn "*fnms<mode>4"
1576 [(set (match_operand:ANYF 0 "register_operand" "=f")
1579 (match_operand:ANYF 1 "register_operand" " f")
1580 (match_operand:ANYF 2 "register_operand" " f")
1581 (match_operand:ANYF 3 "register_operand" " f"))))]
1582 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1583 "fnmadd.<fmt>\t%0,%1,%2,%3"
1584 [(set_attr "type" "fmadd")
1585 (set_attr "mode" "<UNITMODE>")])
1587 ;; -(a * b - c), modulo signed zeros
1588 (define_insn "*fnma<mode>4"
1589 [(set (match_operand:ANYF 0 "register_operand" "=f")
1592 (match_operand:ANYF 1 "register_operand" " f")
1593 (match_operand:ANYF 2 "register_operand" " f")
1594 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1595 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1596 "fnmsub.<fmt>\t%0,%1,%2,%3"
1597 [(set_attr "type" "fmadd")
1598 (set_attr "mode" "<UNITMODE>")])
1601 ;; ....................
1605 ;; ....................
1607 (define_insn "abs<mode>2"
1608 [(set (match_operand:ANYF 0 "register_operand" "=f")
1609 (abs:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1610 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1612 [(set_attr "type" "fmove")
1613 (set_attr "mode" "<UNITMODE>")])
1615 (define_insn "copysign<mode>3"
1616 [(set (match_operand:ANYF 0 "register_operand" "=f")
1617 (unspec:ANYF [(match_operand:ANYF 1 "register_operand" " f")
1618 (match_operand:ANYF 2 "register_operand" " f")]
1620 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1621 "fsgnj.<fmt>\t%0,%1,%2"
1622 [(set_attr "type" "fmove")
1623 (set_attr "mode" "<UNITMODE>")])
1625 (define_insn "neg<mode>2"
1626 [(set (match_operand:ANYF 0 "register_operand" "=f")
1627 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1628 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1630 [(set_attr "type" "fmove")
1631 (set_attr "mode" "<UNITMODE>")])
1634 ;; ....................
1638 ;; ....................
1640 (define_insn "fminm<mode>3"
1641 [(set (match_operand:ANYF 0 "register_operand" "=f")
1642 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1643 (use (match_operand:ANYF 2 "register_operand" " f"))]
1645 "TARGET_HARD_FLOAT && TARGET_ZFA"
1646 "fminm.<fmt>\t%0,%1,%2"
1647 [(set_attr "type" "fmove")
1648 (set_attr "mode" "<UNITMODE>")])
1650 (define_insn "fmaxm<mode>3"
1651 [(set (match_operand:ANYF 0 "register_operand" "=f")
1652 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1653 (use (match_operand:ANYF 2 "register_operand" " f"))]
1655 "TARGET_HARD_FLOAT && TARGET_ZFA"
1656 "fmaxm.<fmt>\t%0,%1,%2"
1657 [(set_attr "type" "fmove")
1658 (set_attr "mode" "<UNITMODE>")])
1660 (define_insn "fmin<mode>3"
1661 [(set (match_operand:ANYF 0 "register_operand" "=f")
1662 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1663 (use (match_operand:ANYF 2 "register_operand" " f"))]
1665 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1666 "fmin.<fmt>\t%0,%1,%2"
1667 [(set_attr "type" "fmove")
1668 (set_attr "mode" "<UNITMODE>")])
1670 (define_insn "fmax<mode>3"
1671 [(set (match_operand:ANYF 0 "register_operand" "=f")
1672 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1673 (use (match_operand:ANYF 2 "register_operand" " f"))]
1675 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1676 "fmax.<fmt>\t%0,%1,%2"
1677 [(set_attr "type" "fmove")
1678 (set_attr "mode" "<UNITMODE>")])
1680 (define_insn "smin<mode>3"
1681 [(set (match_operand:ANYF 0 "register_operand" "=f")
1682 (smin:ANYF (match_operand:ANYF 1 "register_operand" " f")
1683 (match_operand:ANYF 2 "register_operand" " f")))]
1684 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1685 "fmin.<fmt>\t%0,%1,%2"
1686 [(set_attr "type" "fmove")
1687 (set_attr "mode" "<UNITMODE>")])
1689 (define_insn "smax<mode>3"
1690 [(set (match_operand:ANYF 0 "register_operand" "=f")
1691 (smax:ANYF (match_operand:ANYF 1 "register_operand" " f")
1692 (match_operand:ANYF 2 "register_operand" " f")))]
1693 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1694 "fmax.<fmt>\t%0,%1,%2"
1695 [(set_attr "type" "fmove")
1696 (set_attr "mode" "<UNITMODE>")])
1699 ;; ....................
1703 ;; ....................
1706 ;; For RV64, we don't expose the SImode operations to the rtl expanders,
1707 ;; but SImode versions exist for combine.
1709 (define_expand "and<mode>3"
1710 [(set (match_operand:X 0 "register_operand")
1711 (and:X (match_operand:X 1 "register_operand")
1712 (match_operand:X 2 "arith_or_mode_mask_or_zbs_operand")))]
1715 /* If the second operand is a mode mask, emit an extension
1717 if (CONST_INT_P (operands[2]))
1719 enum machine_mode tmode = VOIDmode;
1720 if (UINTVAL (operands[2]) == GET_MODE_MASK (HImode))
1722 else if (UINTVAL (operands[2]) == GET_MODE_MASK (SImode))
1725 if (tmode != VOIDmode)
1727 rtx tmp = gen_lowpart (tmode, operands[1]);
1728 emit_insn (gen_extend_insn (operands[0], tmp, <MODE>mode, tmode, 1));
1734 (define_insn "*and<mode>3"
1735 [(set (match_operand:X 0 "register_operand" "=r,r")
1736 (and:X (match_operand:X 1 "register_operand" "%r,r")
1737 (match_operand:X 2 "arith_operand" " r,I")))]
1740 [(set_attr "type" "logical")
1741 (set_attr "mode" "<MODE>")])
1743 ;; When we construct constants we may want to twiddle a single bit
1744 ;; by generating an IOR. But the constant likely doesn't fit
1745 ;; arith_operand. So the generic code will reload the constant into
1746 ;; a register. Post-reload we won't have the chance to squash things
1747 ;; back into a Zbs insn.
1749 ;; So indirect through a define_expand. That allows us to have a
1750 ;; predicate that conditionally accepts single bit constants without
1751 ;; putting the details of Zbs instructions in here.
1752 (define_expand "<optab><mode>3"
1753 [(set (match_operand:X 0 "register_operand")
1754 (any_or:X (match_operand:X 1 "register_operand" "")
1755 (match_operand:X 2 "arith_or_zbs_operand" "")))]
1758 (define_insn "*<optab><mode>3"
1759 [(set (match_operand:X 0 "register_operand" "=r,r")
1760 (any_or:X (match_operand:X 1 "register_operand" "%r,r")
1761 (match_operand:X 2 "arith_operand" " r,I")))]
1763 "<insn>%i2\t%0,%1,%2"
1764 [(set_attr "type" "logical")
1765 (set_attr "mode" "<MODE>")])
1767 (define_insn "*<optab>si3_internal"
1768 [(set (match_operand:SI 0 "register_operand" "=r,r")
1769 (any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
1770 (match_operand:SI 2 "arith_operand" " r,I")))]
1772 "<insn>%i2\t%0,%1,%2"
1773 [(set_attr "type" "logical")
1774 (set_attr "mode" "SI")])
1776 (define_insn "one_cmpl<mode>2"
1777 [(set (match_operand:X 0 "register_operand" "=r")
1778 (not:X (match_operand:X 1 "register_operand" " r")))]
1781 [(set_attr "type" "logical")
1782 (set_attr "mode" "<MODE>")])
1784 (define_insn "*one_cmplsi2_internal"
1785 [(set (match_operand:SI 0 "register_operand" "=r")
1786 (not:SI (match_operand:SI 1 "register_operand" " r")))]
1789 [(set_attr "type" "logical")
1790 (set_attr "mode" "SI")])
1793 ;; ....................
1797 ;; ....................
1799 (define_insn "truncdfsf2"
1800 [(set (match_operand:SF 0 "register_operand" "=f")
1802 (match_operand:DF 1 "register_operand" " f")))]
1803 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1805 [(set_attr "type" "fcvt")
1806 (set_attr "mode" "SF")])
1808 (define_insn "truncsfhf2"
1809 [(set (match_operand:HF 0 "register_operand" "=f")
1811 (match_operand:SF 1 "register_operand" " f")))]
1812 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1814 [(set_attr "type" "fcvt")
1815 (set_attr "mode" "HF")])
1817 (define_insn "truncdfhf2"
1818 [(set (match_operand:HF 0 "register_operand" "=f")
1820 (match_operand:DF 1 "register_operand" " f")))]
1821 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1822 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1824 [(set_attr "type" "fcvt")
1825 (set_attr "mode" "HF")])
1827 (define_insn "truncsfbf2"
1828 [(set (match_operand:BF 0 "register_operand" "=f")
1830 (match_operand:SF 1 "register_operand" " f")))]
1832 "fcvt.bf16.s\t%0,%1"
1833 [(set_attr "type" "fcvt")
1834 (set_attr "mode" "BF")])
1836 ;; The conversion of HF/DF/TF to BF needs to be done with SF if there is a
1837 ;; chance to generate at least one instruction, otherwise just using
1838 ;; libfunc __trunc[h|d|t]fbf2.
1839 (define_expand "trunc<mode>bf2"
1840 [(set (match_operand:BF 0 "register_operand" "=f")
1842 (match_operand:FBF 1 "register_operand" " f")))]
1845 convert_move (operands[0],
1846 convert_modes (SFmode, <MODE>mode, operands[1], 0), 0);
1849 [(set_attr "type" "fcvt")
1850 (set_attr "mode" "BF")])
1853 ;; ....................
1857 ;; ....................
1861 (define_expand "zero_extendsidi2"
1862 [(set (match_operand:DI 0 "register_operand")
1863 (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
1866 if (SUBREG_P (operands[1]) && SUBREG_PROMOTED_VAR_P (operands[1])
1867 && SUBREG_PROMOTED_UNSIGNED_P (operands[1]))
1869 emit_insn (gen_movdi (operands[0], SUBREG_REG (operands[1])));
1874 (define_insn_and_split "*zero_extendsidi2_internal"
1875 [(set (match_operand:DI 0 "register_operand" "=r,r")
1877 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1878 "TARGET_64BIT && !TARGET_ZBA && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX
1879 && !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
1883 "&& reload_completed
1884 && REG_P (operands[1])
1885 && !paradoxical_subreg_p (operands[0])"
1887 (ashift:DI (match_dup 1) (const_int 32)))
1889 (lshiftrt:DI (match_dup 0) (const_int 32)))]
1890 { operands[1] = gen_lowpart (DImode, operands[1]); }
1891 [(set_attr "move_type" "shift_shift,load")
1892 (set_attr "type" "load")
1893 (set_attr "mode" "DI")])
1895 (define_expand "zero_extendhi<GPR:mode>2"
1896 [(set (match_operand:GPR 0 "register_operand")
1898 (match_operand:HI 1 "nonimmediate_operand")))]
1901 (define_insn_and_split "*zero_extendhi<GPR:mode>2"
1902 [(set (match_operand:GPR 0 "register_operand" "=r,r")
1904 (match_operand:HI 1 "nonimmediate_operand" " r,m")))]
1905 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1909 "&& reload_completed
1910 && REG_P (operands[1])
1911 && !paradoxical_subreg_p (operands[0])"
1913 (ashift:GPR (match_dup 1) (match_dup 2)))
1915 (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
1917 operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
1918 operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
1920 [(set_attr "move_type" "shift_shift,load")
1921 (set_attr "type" "load")
1922 (set_attr "mode" "<GPR:MODE>")])
1924 (define_expand "zero_extendqi<SUPERQI:mode>2"
1925 [(set (match_operand:SUPERQI 0 "register_operand")
1926 (zero_extend:SUPERQI
1927 (match_operand:QI 1 "nonimmediate_operand")))]
1930 (define_insn "*zero_extendqi<SUPERQI:mode>2_internal"
1931 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1932 (zero_extend:SUPERQI
1933 (match_operand:QI 1 "nonimmediate_operand" " r,m")))]
1934 "!TARGET_XTHEADMEMIDX"
1938 [(set_attr "move_type" "andi,load")
1939 (set_attr "type" "arith,load")
1940 (set_attr "mode" "<SUPERQI:MODE>")])
1943 ;; ....................
1947 ;; ....................
1949 (define_expand "extendsidi2"
1950 [(set (match_operand:DI 0 "register_operand" "=r,r")
1952 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1955 if (SUBREG_P (operands[1]) && SUBREG_PROMOTED_VAR_P (operands[1])
1956 && SUBREG_PROMOTED_SIGNED_P (operands[1]))
1958 emit_insn (gen_movdi (operands[0], SUBREG_REG (operands[1])));
1963 (define_insn "*extendsidi2_internal"
1964 [(set (match_operand:DI 0 "register_operand" "=r,r")
1966 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1967 "TARGET_64BIT && !TARGET_XTHEADMEMIDX"
1971 [(set_attr "move_type" "move,load")
1972 (set_attr "type" "move,load")
1973 (set_attr "mode" "DI")])
1975 (define_expand "extend<SHORT:mode><SUPERQI:mode>2"
1976 [(set (match_operand:SUPERQI 0 "register_operand")
1977 (sign_extend:SUPERQI (match_operand:SHORT 1 "nonimmediate_operand")))]
1980 (define_insn_and_split "*extend<SHORT:mode><SUPERQI:mode>2"
1981 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1982 (sign_extend:SUPERQI
1983 (match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
1984 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1987 l<SHORT:size>\t%0,%1"
1988 "&& reload_completed
1989 && REG_P (operands[1])
1990 && !paradoxical_subreg_p (operands[0])"
1991 [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1992 (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
1994 operands[0] = gen_lowpart (SImode, operands[0]);
1995 operands[1] = gen_lowpart (SImode, operands[1]);
1996 operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
1997 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
1999 [(set_attr "move_type" "shift_shift,load")
2000 (set_attr "type" "load")
2001 (set_attr "mode" "SI")])
2003 (define_insn "extendhfsf2"
2004 [(set (match_operand:SF 0 "register_operand" "=f")
2006 (match_operand:HF 1 "register_operand" " f")))]
2007 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
2009 [(set_attr "type" "fcvt")
2010 (set_attr "mode" "SF")])
2012 (define_insn "extendbfsf2"
2013 [(set (match_operand:SF 0 "register_operand" "=f")
2015 (match_operand:BF 1 "register_operand" " f")))]
2017 "fcvt.s.bf16\t%0,%1"
2018 [(set_attr "type" "fcvt")
2019 (set_attr "mode" "SF")])
2021 (define_insn "extendsfdf2"
2022 [(set (match_operand:DF 0 "register_operand" "=f")
2024 (match_operand:SF 1 "register_operand" " f")))]
2025 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
2027 [(set_attr "type" "fcvt")
2028 (set_attr "mode" "DF")])
2030 (define_insn "extendhfdf2"
2031 [(set (match_operand:DF 0 "register_operand" "=f")
2033 (match_operand:HF 1 "register_operand" " f")))]
2034 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
2035 (TARGET_ZHINXMIN && TARGET_ZDINX)"
2037 [(set_attr "type" "fcvt")
2038 (set_attr "mode" "DF")])
2040 ;; 16-bit floating point moves
2041 (define_expand "mov<mode>"
2042 [(set (match_operand:HFBF 0 "")
2043 (match_operand:HFBF 1 ""))]
2046 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
2050 (define_insn "*mov<mode>_hardfloat"
2051 [(set (match_operand:HFBF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2052 (match_operand:HFBF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2053 "((TARGET_ZFHMIN && <MODE>mode == HFmode)
2054 || (TARGET_ZFBFMIN && <MODE>mode == BFmode))
2055 && (register_operand (operands[0], <MODE>mode)
2056 || reg_or_0_operand (operands[1], <MODE>mode))"
2057 { return riscv_output_move (operands[0], operands[1]); }
2058 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2059 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2060 (set_attr "mode" "<MODE>")])
2062 (define_insn "*mov<mode>_softfloat"
2063 [(set (match_operand:HFBF 0 "nonimmediate_operand" "=f, r,r,m,*f,*r")
2064 (match_operand:HFBF 1 "move_operand" " f,Gr,m,r,*r,*f"))]
2065 "((!TARGET_ZFHMIN && <MODE>mode == HFmode) || (<MODE>mode == BFmode))
2066 && (register_operand (operands[0], <MODE>mode)
2067 || reg_or_0_operand (operands[1], <MODE>mode))"
2068 { return riscv_output_move (operands[0], operands[1]); }
2069 [(set_attr "move_type" "fmove,move,load,store,mtc,mfc")
2070 (set_attr "type" "fmove,move,load,store,mtc,mfc")
2071 (set_attr "mode" "<MODE>")])
2073 (define_insn "*mov<HFBF:mode>_softfloat_boxing"
2074 [(set (match_operand:HFBF 0 "register_operand" "=f")
2075 (unspec:HFBF [(match_operand:X 1 "register_operand" " r")]
2076 UNSPEC_FMV_FP16_X))]
2079 [(set_attr "type" "fmove")
2080 (set_attr "mode" "SF")])
2083 ;; ....................
2087 ;; ....................
2089 (define_expand "<fix_uns>_trunc<ANYF:mode>si2"
2090 [(set (match_operand:SI 0 "register_operand" "=r")
2092 (match_operand:ANYF 1 "register_operand" " f")))]
2093 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2097 rtx t = gen_reg_rtx (DImode);
2098 emit_insn (gen_<fix_uns>_trunc<ANYF:mode>si2_sext (t, operands[1]));
2099 t = gen_lowpart (SImode, t);
2100 SUBREG_PROMOTED_VAR_P (t) = 1;
2101 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2102 emit_move_insn (operands[0], t);
2107 (define_insn "*<fix_uns>_trunc<ANYF:mode>si2"
2108 [(set (match_operand:SI 0 "register_operand" "=r")
2110 (match_operand:ANYF 1 "register_operand" " f")))]
2111 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2112 "fcvt.w<u>.<ANYF:fmt> %0,%1,rtz"
2113 [(set_attr "type" "fcvt_f2i")
2114 (set_attr "mode" "<ANYF:MODE>")])
2116 (define_insn "<fix_uns>_trunc<ANYF:mode>si2_sext"
2117 [(set (match_operand:DI 0 "register_operand" "=r")
2118 (sign_extend:DI (fix_ops:SI
2119 (match_operand:ANYF 1 "register_operand" " f"))))]
2120 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2121 "fcvt.w<u>.<ANYF:fmt> %0,%1,rtz"
2122 [(set_attr "type" "fcvt_f2i")
2123 (set_attr "mode" "<ANYF:MODE>")])
2125 (define_insn "<fix_uns>_trunc<ANYF:mode>di2"
2126 [(set (match_operand:DI 0 "register_operand" "=r")
2128 (match_operand:ANYF 1 "register_operand" " f")))]
2129 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2130 "fcvt.l<u>.<ANYF:fmt> %0,%1,rtz"
2131 [(set_attr "type" "fcvt_f2i")
2132 (set_attr "mode" "<ANYF:MODE>")])
2134 (define_insn "float<GPR:mode><ANYF:mode>2"
2135 [(set (match_operand:ANYF 0 "register_operand" "= f")
2137 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
2138 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2139 "fcvt.<ANYF:fmt>.<GPR:ifmt>\t%0,%z1"
2140 [(set_attr "type" "fcvt_i2f")
2141 (set_attr "mode" "<ANYF:MODE>")])
2143 (define_insn "floatuns<GPR:mode><ANYF:mode>2"
2144 [(set (match_operand:ANYF 0 "register_operand" "= f")
2145 (unsigned_float:ANYF
2146 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
2147 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2148 "fcvt.<ANYF:fmt>.<GPR:ifmt>u\t%0,%z1"
2149 [(set_attr "type" "fcvt_i2f")
2150 (set_attr "mode" "<ANYF:MODE>")])
2152 (define_expand "lrint<ANYF:mode>si2"
2153 [(set (match_operand:SI 0 "register_operand" "=r")
2155 [(match_operand:ANYF 1 "register_operand" " f")]
2157 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2161 rtx t = gen_reg_rtx (DImode);
2162 emit_insn (gen_lrint<ANYF:mode>si2_sext (t, operands[1]));
2163 t = gen_lowpart (SImode, t);
2164 SUBREG_PROMOTED_VAR_P (t) = 1;
2165 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2166 emit_move_insn (operands[0], t);
2171 (define_insn "*lrint<ANYF:mode>si2"
2172 [(set (match_operand:SI 0 "register_operand" "=r")
2174 [(match_operand:ANYF 1 "register_operand" " f")]
2176 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2177 "fcvt.w.<ANYF:fmt> %0,%1,dyn"
2178 [(set_attr "type" "fcvt_f2i")
2179 (set_attr "mode" "<ANYF:MODE>")])
2181 (define_insn "lrint<ANYF:mode>si2_sext"
2182 [(set (match_operand:DI 0 "register_operand" "=r")
2183 (sign_extend:DI (unspec:SI
2184 [(match_operand:ANYF 1 "register_operand" " f")]
2186 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2187 "fcvt.w.<ANYF:fmt> %0,%1,dyn"
2188 [(set_attr "type" "fcvt_f2i")
2189 (set_attr "mode" "<ANYF:MODE>")])
2191 (define_insn "lrint<ANYF:mode>di2"
2192 [(set (match_operand:DI 0 "register_operand" "=r")
2194 [(match_operand:ANYF 1 "register_operand" " f")]
2196 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2197 "fcvt.l.<ANYF:fmt> %0,%1,dyn"
2198 [(set_attr "type" "fcvt_f2i")
2199 (set_attr "mode" "<ANYF:MODE>")])
2201 (define_expand "l<round_pattern><ANYF:mode>si2"
2202 [(set (match_operand:SI 0 "register_operand" "=r")
2204 [(match_operand:ANYF 1 "register_operand" " f")]
2206 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2210 rtx t = gen_reg_rtx (DImode);
2211 emit_insn (gen_l<round_pattern><ANYF:mode>si2_sext (t, operands[1]));
2212 t = gen_lowpart (SImode, t);
2213 SUBREG_PROMOTED_VAR_P (t) = 1;
2214 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2215 emit_move_insn (operands[0], t);
2220 (define_insn "*l<round_pattern><ANYF:mode>si2"
2221 [(set (match_operand:SI 0 "register_operand" "=r")
2223 [(match_operand:ANYF 1 "register_operand" " f")]
2225 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2226 "fcvt.w.<ANYF:fmt> %0,%1,<round_rm>"
2227 [(set_attr "type" "fcvt_f2i")
2228 (set_attr "mode" "<ANYF:MODE>")])
2230 (define_insn "l<round_pattern><ANYF:mode>si2_sext"
2231 [(set (match_operand:DI 0 "register_operand" "=r")
2232 (sign_extend:DI (unspec:SI
2233 [(match_operand:ANYF 1 "register_operand" " f")]
2235 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2236 "fcvt.w.<ANYF:fmt> %0,%1,<round_rm>"
2237 [(set_attr "type" "fcvt_f2i")
2238 (set_attr "mode" "<ANYF:MODE>")])
2240 (define_insn "l<round_pattern><ANYF:mode>di2"
2241 [(set (match_operand:DI 0 "register_operand" "=r")
2243 [(match_operand:ANYF 1 "register_operand" " f")]
2245 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2246 "fcvt.l.<ANYF:fmt> %0,%1,<round_rm>"
2247 [(set_attr "type" "fcvt_f2i")
2248 (set_attr "mode" "<ANYF:MODE>")])
2250 ;; There are a couple non-obvious restrictions to be aware of.
2252 ;; We'll do a FP-INT conversion in the sequence. But we don't
2253 ;; have a .l (64bit) variant of those instructions for rv32.
2254 ;; To preserve proper semantics we must reject DFmode inputs
2255 ;; for rv32 unless Zfa is enabled.
2257 ;; The ANYF iterator allows HFmode. We don't have all the
2258 ;; necessary patterns defined for HFmode. So restrict HFmode
2260 (define_expand "<round_pattern><ANYF:mode>2"
2261 [(set (match_operand:ANYF 0 "register_operand" "=f")
2263 [(match_operand:ANYF 1 "register_operand" " f")]
2266 && (TARGET_ZFA || flag_fp_int_builtin_inexact || !flag_trapping_math)
2267 && (TARGET_ZFA || TARGET_64BIT || <ANYF:MODE>mode != DFmode)
2268 && (TARGET_ZFA || <ANYF:MODE>mode != HFmode))"
2271 emit_insn (gen_<round_pattern><ANYF:mode>_zfa2 (operands[0],
2276 rtx label = gen_label_rtx ();
2277 rtx end_label = gen_label_rtx ();
2278 rtx abs_reg = gen_reg_rtx (<ANYF:MODE>mode);
2279 rtx coeff_reg = gen_reg_rtx (<ANYF:MODE>mode);
2280 rtx tmp_reg = gen_reg_rtx (<ANYF:MODE>mode);
2282 riscv_emit_move (tmp_reg, operands[1]);
2283 riscv_emit_move (coeff_reg,
2284 riscv_vector::get_fp_rounding_coefficient (<ANYF:MODE>mode));
2285 emit_insn (gen_abs<ANYF:mode>2 (abs_reg, operands[1]));
2287 riscv_expand_conditional_branch (label, LT, abs_reg, coeff_reg);
2289 emit_jump_insn (gen_jump (end_label));
2293 switch (<ANYF:MODE>mode)
2296 reg = gen_reg_rtx (SImode);
2297 emit_insn (gen_l<round_pattern>sfsi2 (reg, operands[1]));
2298 emit_insn (gen_floatsisf2 (abs_reg, reg));
2301 reg = gen_reg_rtx (DImode);
2302 emit_insn (gen_l<round_pattern>dfdi2 (reg, operands[1]));
2303 emit_insn (gen_floatdidf2 (abs_reg, reg));
2309 emit_insn (gen_copysign<ANYF:mode>3 (tmp_reg, abs_reg, operands[1]));
2311 emit_label (end_label);
2312 riscv_emit_move (operands[0], tmp_reg);
2318 (define_insn "<round_pattern><ANYF:mode>_zfa2"
2319 [(set (match_operand:ANYF 0 "register_operand" "=f")
2321 [(match_operand:ANYF 1 "register_operand" " f")]
2323 "TARGET_HARD_FLOAT && TARGET_ZFA"
2324 "fround.<ANYF:fmt>\t%0,%1,<round_rm>"
2325 [(set_attr "type" "fcvt")
2326 (set_attr "mode" "<ANYF:MODE>")])
2328 (define_insn "rint<ANYF:mode>2"
2329 [(set (match_operand:ANYF 0 "register_operand" "=f")
2331 [(match_operand:ANYF 1 "register_operand" " f")]
2333 "TARGET_HARD_FLOAT && TARGET_ZFA"
2334 "froundnx.<ANYF:fmt>\t%0,%1"
2335 [(set_attr "type" "fcvt")
2336 (set_attr "mode" "<ANYF:MODE>")])
2339 ;; ....................
2343 ;; ....................
2345 ;; Lower-level instructions for loading an address from the GOT.
2346 ;; We could use MEMs, but an unspec gives more optimization
2349 (define_insn "got_load<mode>"
2350 [(set (match_operand:P 0 "register_operand" "=r")
2352 [(match_operand:P 1 "symbolic_operand" "")]
2356 [(set_attr "got" "load")
2357 (set_attr "type" "load")
2358 (set_attr "mode" "<MODE>")])
2360 (define_insn "tls_add_tp_le<mode>"
2361 [(set (match_operand:P 0 "register_operand" "=r")
2363 [(match_operand:P 1 "register_operand" "r")
2364 (match_operand:P 2 "register_operand" "r")
2365 (match_operand:P 3 "symbolic_operand" "")]
2368 "add\t%0,%1,%2,%%tprel_add(%3)"
2369 [(set_attr "type" "arith")
2370 (set_attr "mode" "<MODE>")])
2372 (define_insn "got_load_tls_gd<mode>"
2373 [(set (match_operand:P 0 "register_operand" "=r")
2375 [(match_operand:P 1 "symbolic_operand" "")]
2379 [(set_attr "got" "load")
2380 (set_attr "type" "load")
2381 (set_attr "mode" "<MODE>")])
2383 (define_insn "got_load_tls_ie<mode>"
2384 [(set (match_operand:P 0 "register_operand" "=r")
2386 [(match_operand:P 1 "symbolic_operand" "")]
2390 [(set_attr "got" "load")
2391 (set_attr "type" "load")
2392 (set_attr "mode" "<MODE>")])
2394 (define_insn "@tlsdesc<mode>"
2395 [(set (reg:P A0_REGNUM)
2397 [(match_operand:P 0 "symbolic_operand" "")]
2399 (clobber (reg:P T0_REGNUM))]
2402 return ".LT%=: auipc\ta0,%%tlsdesc_hi(%0)\;"
2403 "<load>\tt0,%%tlsdesc_load_lo(.LT%=)(a0)\;"
2404 "addi\ta0,a0,%%tlsdesc_add_lo(.LT%=)\;"
2405 "jalr\tt0,t0,%%tlsdesc_call(.LT%=)";
2407 [(set_attr "type" "multi")
2408 (set_attr "length" "16")
2409 (set_attr "mode" "<MODE>")])
2411 (define_insn "auipc<mode>"
2412 [(set (match_operand:P 0 "register_operand" "=r")
2414 [(match_operand:P 1 "symbolic_operand" "")
2415 (match_operand:P 2 "const_int_operand")
2419 ".LA%2: auipc\t%0,%h1"
2420 [(set_attr "type" "auipc")
2421 (set_attr "cannot_copy" "yes")])
2423 ;; Instructions for adding the low 12 bits of an address to a register.
2424 ;; Operand 2 is the address: riscv_print_operand works out which relocation
2425 ;; should be applied.
2427 (define_insn "*low<mode>"
2428 [(set (match_operand:P 0 "register_operand" "=r")
2429 (lo_sum:P (match_operand:P 1 "register_operand" " r")
2430 (match_operand:P 2 "symbolic_operand" "")))]
2433 [(set_attr "type" "arith")
2434 (set_attr "mode" "<MODE>")])
2436 ;; Allow combine to split complex const_int load sequences, using operand 2
2437 ;; to store the intermediate results. See move_operand for details.
2439 [(set (match_operand:GPR 0 "register_operand")
2440 (match_operand:GPR 1 "splittable_const_int_operand"))
2441 (clobber (match_operand:GPR 2 "register_operand"))]
2445 riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]),
2450 ;; Likewise, for symbolic operands.
2452 [(set (match_operand:P 0 "register_operand")
2453 (match_operand:P 1))
2454 (clobber (match_operand:P 2 "register_operand"))]
2455 "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
2456 [(set (match_dup 0) (match_dup 3))]
2458 riscv_split_symbol (operands[2], operands[1],
2459 MAX_MACHINE_MODE, &operands[3]);
2462 ;; Pretend to have the ability to load complex const_int in order to get
2463 ;; better code generation around them.
2464 ;; But avoid constants that are special cased elsewhere.
2466 ;; Hide it from IRA register equiv recog* () to elide potential undoing of split
2468 (define_insn_and_split "*mvconst_internal"
2469 [(set (match_operand:GPR 0 "register_operand" "=r")
2470 (match_operand:GPR 1 "splittable_const_int_operand" "i"))]
2472 && !(p2m1_shift_operand (operands[1], <MODE>mode)
2473 || high_mask_shift_operand (operands[1], <MODE>mode)
2474 || exact_log2 (INTVAL (operands[1])) >= 0)"
2479 riscv_move_integer (operands[0], operands[0], INTVAL (operands[1]),
2483 [(set_attr "type" "move")])
2485 ;; 64-bit integer moves
2487 (define_expand "movdi"
2488 [(set (match_operand:DI 0 "")
2489 (match_operand:DI 1 ""))]
2492 if (riscv_legitimize_move (DImode, operands[0], operands[1]))
2496 (define_insn "*movdi_32bit"
2497 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m,r")
2498 (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f,vp"))]
2500 && (register_operand (operands[0], DImode)
2501 || reg_or_0_operand (operands[1], DImode))"
2502 { return riscv_output_move (operands[0], operands[1]); }
2503 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2504 (set_attr "mode" "DI")
2505 (set_attr "type" "move,move,load,store,move,fpload,move,fmove,fpstore,move")
2506 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2508 (define_insn "*movdi_64bit"
2509 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*f,*m,r")
2510 (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f,vp"))]
2512 && (register_operand (operands[0], DImode)
2513 || reg_or_0_operand (operands[1], DImode))"
2514 { return riscv_output_move (operands[0], operands[1]); }
2515 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2516 (set_attr "mode" "DI")
2517 (set_attr "type" "move,move,load,store,mtc,fpload,mfc,fmove,fpstore,move")
2518 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2520 ;; 32-bit Integer moves
2522 (define_expand "mov<mode>"
2523 [(set (match_operand:MOVE32 0 "")
2524 (match_operand:MOVE32 1 ""))]
2527 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
2531 (define_insn "*movsi_internal"
2532 [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*m,r")
2533 (match_operand:SI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,vp"))]
2534 "(register_operand (operands[0], SImode)
2535 || reg_or_0_operand (operands[1], SImode))
2536 && !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
2537 { return riscv_output_move (operands[0], operands[1]); }
2538 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore,rdvlenb")
2539 (set_attr "mode" "SI")
2540 (set_attr "type" "move,move,load,store,mtc,fpload,mfc,fpstore,move")
2541 (set_attr "ext" "base,base,base,base,f,f,f,f,vector")])
2543 ;; 16-bit Integer moves
2545 ;; Unlike most other insns, the move insns can't be split with
2546 ;; different predicates, because register spilling and other parts of
2547 ;; the compiler, have memoized the insn number already.
2548 ;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
2550 (define_expand "movhi"
2551 [(set (match_operand:HI 0 "")
2552 (match_operand:HI 1 ""))]
2555 if (riscv_legitimize_move (HImode, operands[0], operands[1]))
2559 (define_insn "*movhi_internal"
2560 [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2561 (match_operand:HI 1 "move_operand" " r,T,m,rJ,*r*J,*f,vp"))]
2562 "(register_operand (operands[0], HImode)
2563 || reg_or_0_operand (operands[1], HImode))"
2564 { return riscv_output_move (operands[0], operands[1]); }
2565 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2566 (set_attr "mode" "HI")
2567 (set_attr "type" "move,move,load,store,mtc,mfc,move")
2568 (set_attr "ext" "base,base,base,base,f,f,vector")])
2570 ;; HImode constant generation; see riscv_move_integer for details.
2571 ;; si+si->hi without truncation is legal because of
2572 ;; TARGET_TRULY_NOOP_TRUNCATION.
2574 (define_insn "*add<mode>hi3"
2575 [(set (match_operand:HI 0 "register_operand" "=r,r")
2576 (plus:HI (match_operand:HISI 1 "register_operand" " r,r")
2577 (match_operand:HISI 2 "arith_operand" " r,I")))]
2579 "add%i2%~\t%0,%1,%2"
2580 [(set_attr "type" "arith")
2581 (set_attr "mode" "HI")])
2583 (define_insn "*xor<mode>hi3"
2584 [(set (match_operand:HI 0 "register_operand" "=r,r")
2585 (xor:HI (match_operand:HISI 1 "register_operand" " r,r")
2586 (match_operand:HISI 2 "arith_operand" " r,I")))]
2589 [(set_attr "type" "logical")
2590 (set_attr "mode" "HI")])
2592 ;; 8-bit Integer moves
2594 (define_expand "movqi"
2595 [(set (match_operand:QI 0 "")
2596 (match_operand:QI 1 ""))]
2599 if (riscv_legitimize_move (QImode, operands[0], operands[1]))
2603 (define_insn "*movqi_internal"
2604 [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2605 (match_operand:QI 1 "move_operand" " r,I,m,rJ,*r*J,*f,vp"))]
2606 "(register_operand (operands[0], QImode)
2607 || reg_or_0_operand (operands[1], QImode))"
2608 { return riscv_output_move (operands[0], operands[1]); }
2609 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2610 (set_attr "mode" "QI")
2611 (set_attr "type" "move,move,load,store,mtc,mfc,move")
2612 (set_attr "ext" "base,base,base,base,f,f,vector")])
2614 ;; 32-bit floating point moves
2616 (define_expand "movsf"
2617 [(set (match_operand:SF 0 "")
2618 (match_operand:SF 1 ""))]
2621 if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
2625 (define_insn "*movsf_hardfloat"
2626 [(set (match_operand:SF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2627 (match_operand:SF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2629 && (register_operand (operands[0], SFmode)
2630 || reg_or_0_operand (operands[1], SFmode))"
2631 { return riscv_output_move (operands[0], operands[1]); }
2632 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2633 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2634 (set_attr "mode" "SF")])
2636 (define_insn "*movsf_softfloat"
2637 [(set (match_operand:SF 0 "nonimmediate_operand" "= r,r,m")
2638 (match_operand:SF 1 "move_operand" " Gr,m,r"))]
2640 && (register_operand (operands[0], SFmode)
2641 || reg_or_0_operand (operands[1], SFmode))"
2642 { return riscv_output_move (operands[0], operands[1]); }
2643 [(set_attr "move_type" "move,load,store")
2644 (set_attr "type" "move,load,store")
2645 (set_attr "mode" "SF")])
2647 ;; 64-bit floating point moves
2649 (define_expand "movdf"
2650 [(set (match_operand:DF 0 "")
2651 (match_operand:DF 1 ""))]
2654 if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
2659 ;; In RV32, we lack fmv.x.d and fmv.d.x. Go through memory instead.
2660 ;; (However, we can still use fcvt.d.w to zero a floating-point register.)
2661 (define_insn "*movdf_hardfloat_rv32"
2662 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*zmvf,*zmvr, *r,*r,*th_m_noi")
2663 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*zmvr,*zmvf,*r*G,*th_m_noi,*r"))]
2664 "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
2665 && (register_operand (operands[0], DFmode)
2666 || reg_or_0_operand (operands[1], DFmode))"
2667 { return riscv_output_move (operands[0], operands[1]); }
2668 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2669 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2670 (set_attr "mode" "DF")])
2672 (define_insn "*movdf_hardfloat_rv64"
2673 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2674 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*r*G,*m,*r"))]
2675 "TARGET_64BIT && TARGET_DOUBLE_FLOAT
2676 && (register_operand (operands[0], DFmode)
2677 || reg_or_0_operand (operands[1], DFmode))"
2678 { return riscv_output_move (operands[0], operands[1]); }
2679 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2680 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2681 (set_attr "mode" "DF")])
2683 (define_insn "*movdf_softfloat"
2684 [(set (match_operand:DF 0 "nonimmediate_operand" "= r,r, m")
2685 (match_operand:DF 1 "move_operand" " rG,m,rG"))]
2686 "!TARGET_DOUBLE_FLOAT
2687 && (register_operand (operands[0], DFmode)
2688 || reg_or_0_operand (operands[1], DFmode))"
2689 { return riscv_output_move (operands[0], operands[1]); }
2690 [(set_attr "move_type" "move,load,store")
2691 (set_attr "type" "fmove,fpload,fpstore")
2692 (set_attr "mode" "DF")])
2694 (define_insn "movsidf2_low_rv32"
2695 [(set (match_operand:SI 0 "register_operand" "= r")
2697 [(match_operand:DF 1 "register_operand" "zmvf")]
2699 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2701 [(set_attr "move_type" "fmove")
2702 (set_attr "type" "fmove")
2703 (set_attr "mode" "DF")])
2706 (define_insn "movsidf2_high_rv32"
2707 [(set (match_operand:SI 0 "register_operand" "= r")
2709 [(match_operand:DF 1 "register_operand" "zmvf")]
2711 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2713 [(set_attr "move_type" "fmove")
2714 (set_attr "type" "fmove")
2715 (set_attr "mode" "DF")])
2717 (define_insn "movdfsisi3_rv32"
2718 [(set (match_operand:DF 0 "register_operand" "= f")
2720 (match_operand:SI 2 "register_operand" "zmvr")
2722 (match_operand:SI 1 "register_operand" "zmvr")
2724 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2725 "fmvp.d.x\t%0,%2,%1"
2726 [(set_attr "move_type" "fmove")
2727 (set_attr "type" "fmove")
2728 (set_attr "mode" "DF")])
2731 [(set (match_operand:MOVE64 0 "nonimmediate_operand")
2732 (match_operand:MOVE64 1 "move_operand"))]
2734 && riscv_split_64bit_move_p (operands[0], operands[1])"
2737 riscv_split_doubleword_move (operands[0], operands[1]);
2741 (define_expand "cmpmemsi"
2742 [(parallel [(set (match_operand:SI 0)
2743 (compare:SI (match_operand:BLK 1)
2744 (match_operand:BLK 2)))
2745 (use (match_operand:SI 3))
2746 (use (match_operand:SI 4))])]
2749 /* If TARGET_VECTOR is false, this routine will return false and we will
2750 try scalar expansion. */
2751 if (riscv_vector::expand_vec_cmpmem (operands[0], operands[1],
2752 operands[2], operands[3]))
2755 rtx temp = gen_reg_rtx (word_mode);
2756 if (riscv_expand_block_compare (temp, operands[1], operands[2],
2761 temp = gen_lowpart (SImode, temp);
2762 SUBREG_PROMOTED_VAR_P (temp) = 1;
2763 SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
2765 emit_move_insn (operands[0], temp);
2772 (define_expand "cpymem<mode>"
2773 [(parallel [(set (match_operand:BLK 0 "general_operand")
2774 (match_operand:BLK 1 "general_operand"))
2775 (use (match_operand:P 2 ""))
2776 (use (match_operand:SI 3 "const_int_operand"))])]
2779 if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
2785 ;; Fill memory with constant byte.
2786 ;; Argument 0 is the destination
2787 ;; Argument 1 is the constant byte
2788 ;; Argument 2 is the length
2789 ;; Argument 3 is the alignment
2791 (define_expand "setmem<mode>"
2792 [(parallel [(set (match_operand:BLK 0 "memory_operand")
2793 (match_operand:QI 2 "nonmemory_operand"))
2794 (use (match_operand:P 1 ""))
2795 (use (match_operand:SI 3 "const_int_operand"))])]
2798 /* If TARGET_VECTOR is false, this routine will return false and we will
2799 try scalar expansion. */
2800 if (riscv_vector::expand_vec_setmem (operands[0], operands[1], operands[2]))
2803 /* If value to set is not zero, use the library routine. */
2804 if (operands[2] != const0_rtx)
2807 if (riscv_expand_block_clear (operands[0], operands[1]))
2813 (define_expand "movmem<mode>"
2814 [(parallel [(set (match_operand:BLK 0 "general_operand")
2815 (match_operand:BLK 1 "general_operand"))
2816 (use (match_operand:P 2 "const_int_operand"))
2817 (use (match_operand:SI 3 "const_int_operand"))])]
2820 if (riscv_vector::expand_block_move (operands[0], operands[1], operands[2],
2827 ;; Expand in-line code to clear the instruction cache between operand[0] and
2829 (define_expand "clear_cache"
2830 [(match_operand 0 "pmode_register_operand")
2831 (match_operand 1 "pmode_register_operand")]
2834 #ifdef ICACHE_FLUSH_FUNC
2835 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, ICACHE_FLUSH_FUNC),
2836 LCT_NORMAL, VOIDmode, operands[0], Pmode,
2837 operands[1], Pmode, const0_rtx, Pmode);
2839 if (TARGET_ZIFENCEI)
2840 emit_insn (gen_fence_i ());
2845 (define_insn "fence"
2846 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE)]
2849 [(set_attr "type" "atomic")])
2851 (define_insn "fence_i"
2852 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE_I)]
2855 [(set_attr "type" "atomic")])
2857 (define_insn "riscv_pause"
2858 [(unspec_volatile [(const_int 0)] UNSPECV_PAUSE)]
2860 "* return TARGET_ZIHINTPAUSE ? \"pause\" : \".insn\t0x0100000f\";"
2861 [(set_attr "type" "atomic")])
2864 ;; ....................
2868 ;; ....................
2870 ;; Use a QImode shift count, to avoid generating sign or zero extend
2871 ;; instructions for shift counts, and to avoid dropping subregs.
2872 ;; expand_shift_1 can do this automatically when SHIFT_COUNT_TRUNCATED is
2873 ;; defined, but use of that is discouraged.
2875 (define_insn "*<optab>si3"
2876 [(set (match_operand:SI 0 "register_operand" "= r")
2878 (match_operand:SI 1 "register_operand" " r")
2879 (match_operand:QI 2 "arith_operand" " rI")))]
2882 if (GET_CODE (operands[2]) == CONST_INT)
2883 operands[2] = GEN_INT (INTVAL (operands[2])
2884 & (GET_MODE_BITSIZE (SImode) - 1));
2886 return "<insn>%i2%~\t%0,%1,%2";
2888 [(set_attr "type" "shift")
2889 (set_attr "mode" "SI")])
2891 (define_expand "<optab>si3"
2892 [(set (match_operand:SI 0 "register_operand" "= r")
2893 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2894 (match_operand:QI 2 "arith_operand" " rI")))]
2899 rtx t = gen_reg_rtx (DImode);
2900 emit_insn (gen_<optab>si3_extend (t, operands[1], operands[2]));
2901 t = gen_lowpart (SImode, t);
2902 SUBREG_PROMOTED_VAR_P (t) = 1;
2903 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2904 emit_move_insn (operands[0], t);
2909 (define_insn "<optab>di3"
2910 [(set (match_operand:DI 0 "register_operand" "= r")
2912 (match_operand:DI 1 "register_operand" " r")
2913 (match_operand:QI 2 "arith_operand" " rI")))]
2916 if (GET_CODE (operands[2]) == CONST_INT)
2917 operands[2] = GEN_INT (INTVAL (operands[2])
2918 & (GET_MODE_BITSIZE (DImode) - 1));
2920 return "<insn>%i2\t%0,%1,%2";
2922 [(set_attr "type" "shift")
2923 (set_attr "mode" "DI")])
2925 (define_insn_and_split "*<optab><GPR:mode>3_mask_1"
2926 [(set (match_operand:GPR 0 "register_operand" "= r")
2928 (match_operand:GPR 1 "register_operand" " r")
2929 (match_operator 4 "subreg_lowpart_operator"
2931 (match_operand:GPR2 2 "register_operand" "r")
2932 (match_operand 3 "<GPR:shiftm1>"))])))]
2937 (any_shift:GPR (match_dup 1)
2939 "operands[2] = gen_lowpart (QImode, operands[2]);"
2940 [(set_attr "type" "shift")
2941 (set_attr "mode" "<GPR:MODE>")])
2943 (define_insn "<optab>si3_extend"
2944 [(set (match_operand:DI 0 "register_operand" "= r")
2946 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2947 (match_operand:QI 2 "arith_operand" " rI"))))]
2950 if (GET_CODE (operands[2]) == CONST_INT)
2951 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2953 return "<insn>%i2w\t%0,%1,%2";
2955 [(set_attr "type" "shift")
2956 (set_attr "mode" "SI")])
2958 (define_insn_and_split "*<optab>si3_extend_mask"
2959 [(set (match_operand:DI 0 "register_operand" "= r")
2962 (match_operand:SI 1 "register_operand" " r")
2963 (match_operator 4 "subreg_lowpart_operator"
2965 (match_operand:GPR 2 "register_operand" " r")
2966 (match_operand 3 "const_si_mask_operand"))]))))]
2972 (any_shift:SI (match_dup 1)
2974 "operands[2] = gen_lowpart (QImode, operands[2]);"
2975 [(set_attr "type" "shift")
2976 (set_attr "mode" "SI")])
2978 ;; We can reassociate the shift and bitwise operator which may allow us to
2979 ;; reduce the immediate operand of the bitwise operator into a range that
2980 ;; fits in a simm12.
2982 ;; We need to make sure that shifting does not lose any bits, particularly
2983 ;; for IOR/XOR. It probably doesn't matter for AND.
2985 ;; We also don't want to do this if the immediate already fits in a simm12
2986 ;; field, or it is a single bit operand and zbs is available.
2987 (define_insn_and_split "<optab>_shift_reverse<X:mode>"
2988 [(set (match_operand:X 0 "register_operand" "=r")
2989 (any_bitwise:X (ashift:X (match_operand:X 1 "register_operand" "r")
2990 (match_operand 2 "immediate_operand" "n"))
2991 (match_operand 3 "immediate_operand" "n")))]
2992 "(!SMALL_OPERAND (INTVAL (operands[3]))
2993 && SMALL_OPERAND (INTVAL (operands[3]) >> INTVAL (operands[2]))
2994 && (!TARGET_ZBS || popcount_hwi (INTVAL (operands[3])) > 1)
2995 && (INTVAL (operands[3]) & ((1ULL << INTVAL (operands[2])) - 1)) == 0)"
2998 [(set (match_dup 0) (any_bitwise:X (match_dup 1) (match_dup 3)))
2999 (set (match_dup 0) (ashift:X (match_dup 0) (match_dup 2)))]
3001 operands[3] = GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2]));
3003 [(set_attr "type" "shift")
3004 (set_attr "mode" "<X:MODE>")])
3006 ;; Non-canonical, but can be formed by ree when combine is not successful at
3007 ;; producing one of the two canonical patterns below.
3008 (define_insn "*lshrsi3_zero_extend_1"
3009 [(set (match_operand:DI 0 "register_operand" "=r")
3011 (lshiftrt:SI (match_operand:SI 1 "register_operand" " r")
3012 (match_operand 2 "const_int_operand"))))]
3013 "TARGET_64BIT && (INTVAL (operands[2]) & 0x1f) > 0"
3015 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
3017 return "srliw\t%0,%1,%2";
3019 [(set_attr "type" "shift")
3020 (set_attr "mode" "SI")])
3022 ;; Canonical form for a sign/zero-extend of a logical right shift.
3023 ;; Special case: extract MSB bits of lower 32-bit word
3024 (define_insn "*lshrsi3_extend_2"
3025 [(set (match_operand:DI 0 "register_operand" "=r")
3026 (any_extract:DI (match_operand:DI 1 "register_operand" " r")
3027 (match_operand 2 "const_int_operand")
3028 (match_operand 3 "const_int_operand")))]
3029 "(TARGET_64BIT && (INTVAL (operands[3]) > 0)
3030 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
3032 return "<extract_sidi_shift>\t%0,%1,%3";
3034 [(set_attr "type" "shift")
3035 (set_attr "mode" "SI")])
3037 ;; Canonical form for a zero-extend of a logical right shift when the
3038 ;; shift count is 31.
3039 (define_insn "*lshrsi3_zero_extend_3"
3040 [(set (match_operand:DI 0 "register_operand" "=r")
3041 (lt:DI (match_operand:SI 1 "register_operand" " r")
3045 return "srliw\t%0,%1,31";
3047 [(set_attr "type" "shift")
3048 (set_attr "mode" "SI")])
3050 ;; Canonical form for a extend of a logical shift right (sign/zero extraction).
3051 ;; Special cases, that are ignored (handled elsewhere):
3052 ;; * Single-bit extraction (Zbs/XTheadBs)
3053 ;; * Single-bit extraction (Zicondops/XVentanaCondops)
3054 ;; * Single-bit extraction (SFB)
3055 ;; * Extraction instruction th.ext(u) (XTheadBb)
3056 ;; * lshrsi3_extend_2 (see above)
3057 (define_insn_and_split "*<any_extract:optab><GPR:mode>3"
3058 [(set (match_operand:GPR 0 "register_operand" "=r")
3060 (match_operand:GPR 1 "register_operand" " r")
3061 (match_operand 2 "const_int_operand")
3062 (match_operand 3 "const_int_operand")))
3063 (clobber (match_scratch:GPR 4 "=&r"))]
3064 "!((TARGET_ZBS || TARGET_XTHEADBS || TARGET_ZICOND
3065 || TARGET_XVENTANACONDOPS || TARGET_SFB_ALU)
3066 && (INTVAL (operands[2]) == 1))
3069 && (INTVAL (operands[3]) > 0)
3070 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
3072 "&& reload_completed"
3074 (ashift:GPR (match_dup 1) (match_dup 2)))
3076 (<extract_shift>:GPR (match_dup 4) (match_dup 3)))]
3078 int regbits = GET_MODE_BITSIZE (GET_MODE (operands[0])).to_constant ();
3079 int sizebits = INTVAL (operands[2]);
3080 int startbits = INTVAL (operands[3]);
3081 int lshamt = regbits - sizebits - startbits;
3082 int rshamt = lshamt + startbits;
3083 operands[2] = GEN_INT (lshamt);
3084 operands[3] = GEN_INT (rshamt);
3086 [(set_attr "type" "shift")
3087 (set_attr "mode" "<GPR:MODE>")])
3089 ;; Handle AND with 2^N-1 for N from 12 to XLEN. This can be split into
3090 ;; two logical shifts. Otherwise it requires 3 instructions: lui,
3091 ;; xor/addi/srli, and.
3093 ;; Generating a temporary for the shift output gives better combiner results;
3094 ;; and also fixes a problem where op0 could be a paradoxical reg and shifting
3095 ;; by amounts larger than the size of the SUBREG_REG doesn't work.
3097 [(set (match_operand:GPR 0 "register_operand")
3098 (and:GPR (match_operand:GPR 1 "register_operand")
3099 (match_operand:GPR 2 "p2m1_shift_operand")))
3100 (clobber (match_operand:GPR 3 "register_operand"))]
3103 (ashift:GPR (match_dup 1) (match_dup 2)))
3105 (lshiftrt:GPR (match_dup 3) (match_dup 2)))]
3107 /* Op2 is a VOIDmode constant, so get the mode size from op1. */
3108 operands[2] = GEN_INT (GET_MODE_BITSIZE (GET_MODE (operands[1])).to_constant ()
3109 - exact_log2 (INTVAL (operands[2]) + 1));
3112 ;; Handle AND with 0xF...F0...0 where there are 32 to 63 zeros. This can be
3113 ;; split into two shifts. Otherwise it requires 3 instructions: li, sll, and.
3115 [(set (match_operand:DI 0 "register_operand")
3116 (and:DI (match_operand:DI 1 "register_operand")
3117 (match_operand:DI 2 "high_mask_shift_operand")))
3118 (clobber (match_operand:DI 3 "register_operand"))]
3121 (lshiftrt:DI (match_dup 1) (match_dup 2)))
3123 (ashift:DI (match_dup 3) (match_dup 2)))]
3125 operands[2] = GEN_INT (ctz_hwi (INTVAL (operands[2])));
3128 ;; Handle SImode to DImode zero-extend combined with a left shift. This can
3129 ;; occur when unsigned int is used for array indexing. Split this into two
3130 ;; shifts. Otherwise we can get 3 shifts.
3132 (define_insn_and_split "zero_extendsidi2_shifted"
3133 [(set (match_operand:DI 0 "register_operand" "=r")
3134 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
3135 (match_operand:QI 2 "immediate_operand" "I"))
3136 (match_operand 3 "immediate_operand" "")))
3137 (clobber (match_scratch:DI 4 "=&r"))]
3138 "TARGET_64BIT && !TARGET_ZBA
3139 && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)"
3141 "&& reload_completed"
3143 (ashift:DI (match_dup 1) (const_int 32)))
3145 (lshiftrt:DI (match_dup 4) (match_dup 5)))]
3146 "operands[5] = GEN_INT (32 - (INTVAL (operands [2])));"
3147 [(set_attr "type" "shift")
3148 (set_attr "mode" "DI")])
3151 ;; ....................
3153 ;; CONDITIONAL BRANCHES
3155 ;; ....................
3157 ;; Conditional branches
3159 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_equals_zero"
3161 (if_then_else (match_operator 1 "equality_operator"
3162 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
3163 (match_operand 3 "shifted_const_arith_operand" "i"))
3165 (label_ref (match_operand 0 "" ""))
3167 (clobber (match_scratch:X 4 "=&r"))]
3168 "!SMALL_OPERAND (INTVAL (operands[3]))"
3170 "&& reload_completed"
3171 [(set (match_dup 4) (lshiftrt:X (subreg:X (match_dup 2) 0) (match_dup 6)))
3172 (set (match_dup 4) (and:X (match_dup 4) (match_dup 7)))
3173 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
3174 (label_ref (match_dup 0)) (pc)))]
3176 HOST_WIDE_INT mask = INTVAL (operands[3]);
3177 int trailing = ctz_hwi (mask);
3179 operands[6] = GEN_INT (trailing);
3180 operands[7] = GEN_INT (mask >> trailing);
3182 [(set_attr "type" "branch")])
3184 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_<optab>_shifted"
3186 (if_then_else (any_eq
3187 (and:ANYI (match_operand:ANYI 1 "register_operand" "r")
3188 (match_operand 2 "shifted_const_arith_operand" "i"))
3189 (match_operand 3 "shifted_const_arith_operand" "i"))
3190 (label_ref (match_operand 0 "" ""))
3192 (clobber (match_scratch:X 4 "=&r"))
3193 (clobber (match_scratch:X 5 "=&r"))]
3194 "!SMALL_OPERAND (INTVAL (operands[2]))
3195 && !SMALL_OPERAND (INTVAL (operands[3]))
3196 && SMALL_AFTER_COMMON_TRAILING_SHIFT (INTVAL (operands[2]),
3197 INTVAL (operands[3]))"
3199 "&& reload_completed"
3200 [(set (match_dup 4) (ashiftrt:X (match_dup 1) (match_dup 7)))
3201 (set (match_dup 4) (and:X (match_dup 4) (match_dup 8)))
3202 (set (match_dup 5) (match_dup 9))
3203 (set (pc) (if_then_else (any_eq (match_dup 4) (match_dup 5))
3204 (label_ref (match_dup 0)) (pc)))]
3206 HOST_WIDE_INT mask1 = INTVAL (operands[2]);
3207 HOST_WIDE_INT mask2 = INTVAL (operands[3]);
3208 int trailing_shift = COMMON_TRAILING_ZEROS (mask1, mask2);
3210 operands[7] = GEN_INT (trailing_shift);
3211 operands[8] = GEN_INT (mask1 >> trailing_shift);
3212 operands[9] = GEN_INT (mask2 >> trailing_shift);
3214 [(set_attr "type" "branch")])
3216 (define_insn_and_split "*branch<ANYI:mode>_shiftedmask_equals_zero"
3218 (if_then_else (match_operator 1 "equality_operator"
3219 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
3220 (match_operand 3 "consecutive_bits_operand" "i"))
3222 (label_ref (match_operand 0 "" ""))
3224 (clobber (match_scratch:X 4 "=&r"))]
3225 "(INTVAL (operands[3]) >= 0 || !partial_subreg_p (operands[2]))
3226 && popcount_hwi (INTVAL (operands[3])) > 1
3227 && !SMALL_OPERAND (INTVAL (operands[3]))"
3229 "&& reload_completed"
3230 [(set (match_dup 4) (ashift:X (subreg:X (match_dup 2) 0) (match_dup 6)))
3231 (set (match_dup 4) (lshiftrt:X (match_dup 4) (match_dup 7)))
3232 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
3233 (label_ref (match_dup 0)) (pc)))]
3235 unsigned HOST_WIDE_INT mask = INTVAL (operands[3]);
3236 int leading = clz_hwi (mask);
3237 int trailing = ctz_hwi (mask);
3239 operands[6] = GEN_INT (leading);
3240 operands[7] = GEN_INT (leading + trailing);
3242 [(set_attr "type" "branch")])
3244 (define_insn "*branch<mode>"
3247 (match_operator 1 "ordered_comparison_operator"
3248 [(match_operand:X 2 "register_operand" "r")
3249 (match_operand:X 3 "reg_or_0_operand" "rJ")])
3250 (label_ref (match_operand 0 "" ""))
3254 if (get_attr_length (insn) == 12)
3255 return "b%n1\t%2,%z3,1f; jump\t%l0,ra; 1:";
3257 return "b%C1\t%2,%z3,%l0";
3259 [(set_attr "type" "branch")
3260 (set_attr "mode" "none")])
3262 ;; Conditional move and add patterns.
3264 (define_expand "mov<mode>cc"
3265 [(set (match_operand:GPR 0 "register_operand")
3266 (if_then_else:GPR (match_operand 1 "comparison_operator")
3267 (match_operand:GPR 2 "movcc_operand")
3268 (match_operand:GPR 3 "movcc_operand")))]
3269 "TARGET_SFB_ALU || TARGET_XTHEADCONDMOV || TARGET_ZICOND_LIKE
3272 if (riscv_expand_conditional_move (operands[0], operands[1],
3273 operands[2], operands[3]))
3279 (define_expand "add<mode>cc"
3280 [(match_operand:GPR 0 "register_operand")
3281 (match_operand 1 "comparison_operator")
3282 (match_operand:GPR 2 "arith_operand")
3283 (match_operand:GPR 3 "arith_operand")]
3286 rtx cmp = operands[1];
3287 rtx cmp0 = XEXP (cmp, 0);
3288 rtx cmp1 = XEXP (cmp, 1);
3289 machine_mode mode0 = GET_MODE (cmp0);
3291 /* We only handle word mode integer compares for now. */
3292 if (INTEGRAL_MODE_P (mode0) && mode0 != word_mode)
3295 enum rtx_code code = GET_CODE (cmp);
3296 rtx reg0 = gen_reg_rtx (<MODE>mode);
3297 rtx reg1 = gen_reg_rtx (<MODE>mode);
3298 rtx reg2 = gen_reg_rtx (<MODE>mode);
3299 bool invert = false;
3301 if (INTEGRAL_MODE_P (mode0))
3302 riscv_expand_int_scc (reg0, code, cmp0, cmp1, &invert);
3303 else if (FLOAT_MODE_P (mode0) && fp_scc_comparison (cmp, GET_MODE (cmp)))
3304 riscv_expand_float_scc (reg0, code, cmp0, cmp1, &invert);
3309 riscv_emit_binary (PLUS, reg1, reg0, constm1_rtx);
3311 riscv_emit_unary (NEG, reg1, reg0);
3312 riscv_emit_binary (AND, reg2, reg1, operands[3]);
3313 riscv_emit_binary (PLUS, operands[0], reg2, operands[2]);
3318 ;; Used to implement built-in functions.
3319 (define_expand "condjump"
3321 (if_then_else (match_operand 0)
3322 (label_ref (match_operand 1))
3325 (define_expand "@cbranch<mode>4"
3327 (if_then_else (match_operator 0 "comparison_operator"
3328 [(match_operand:BR 1 "register_operand")
3329 (match_operand:BR 2 "nonmemory_operand")])
3330 (label_ref (match_operand 3 ""))
3334 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
3335 operands[1], operands[2]);
3339 (define_expand "@cbranch<ANYF:mode>4"
3340 [(parallel [(set (pc)
3341 (if_then_else (match_operator 0 "fp_branch_comparison"
3342 [(match_operand:ANYF 1 "register_operand")
3343 (match_operand:ANYF 2 "register_operand")])
3344 (label_ref (match_operand 3 ""))
3346 (clobber (match_operand 4 ""))])]
3347 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3349 if (!signed_order_operator (operands[0], GET_MODE (operands[0])))
3351 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
3352 operands[1], operands[2]);
3355 operands[4] = gen_reg_rtx (TARGET_64BIT ? DImode : SImode);
3358 (define_insn_and_split "*cbranch<ANYF:mode>4"
3360 (if_then_else (match_operator 1 "fp_native_comparison"
3361 [(match_operand:ANYF 2 "register_operand" "f")
3362 (match_operand:ANYF 3 "register_operand" "f")])
3363 (label_ref (match_operand 0 ""))
3365 (clobber (match_operand:X 4 "register_operand" "=r"))]
3366 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3368 "&& reload_completed"
3370 (match_op_dup:X 1 [(match_dup 2) (match_dup 3)]))
3372 (if_then_else (ne:X (match_dup 4) (const_int 0))
3373 (label_ref (match_operand 0))
3376 [(set_attr "type" "branch")
3377 (set (attr "length")
3378 (if_then_else (and (le (minus (match_dup 0) (pc))
3380 (le (minus (pc) (match_dup 0))
3383 (if_then_else (and (le (minus (match_dup 0) (pc))
3384 (const_int 1048564))
3385 (le (minus (pc) (match_dup 0))
3386 (const_int 1048576)))
3390 (define_insn_and_split "*cbranch<ANYF:mode>4"
3392 (if_then_else (match_operator 1 "ne_operator"
3393 [(match_operand:ANYF 2 "register_operand" "f")
3394 (match_operand:ANYF 3 "register_operand" "f")])
3395 (label_ref (match_operand 0 ""))
3397 (clobber (match_operand:X 4 "register_operand" "=r"))]
3398 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3400 "&& reload_completed"
3402 (eq:X (match_dup 2) (match_dup 3)))
3404 (if_then_else (eq:X (match_dup 4) (const_int 0))
3405 (label_ref (match_operand 0))
3408 [(set_attr "type" "branch")
3409 (set (attr "length")
3410 (if_then_else (and (le (minus (match_dup 0) (pc))
3412 (le (minus (pc) (match_dup 0))
3415 (if_then_else (and (le (minus (match_dup 0) (pc))
3416 (const_int 1048564))
3417 (le (minus (pc) (match_dup 0))
3418 (const_int 1048576)))
3422 (define_insn_and_split "*branch_on_bit<X:mode>"
3425 (match_operator 0 "equality_operator"
3426 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
3428 (match_operand 3 "branch_on_bit_operand"))
3430 (label_ref (match_operand 1))
3432 (clobber (match_scratch:X 4 "=&r"))]
3437 (ashift:X (match_dup 2) (match_dup 3)))
3440 (match_op_dup 0 [(match_dup 4) (const_int 0)])
3441 (label_ref (match_operand 1))
3444 int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
3445 operands[3] = GEN_INT (shift);
3447 if (GET_CODE (operands[0]) == EQ)
3448 operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
3450 operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
3452 [(set_attr "type" "branch")])
3454 (define_insn_and_split "*branch_on_bit_range<X:mode>"
3457 (match_operator 0 "equality_operator"
3458 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
3459 (match_operand 3 "branch_on_bit_operand")
3462 (label_ref (match_operand 1))
3464 (clobber (match_scratch:X 4 "=&r"))]
3469 (ashift:X (match_dup 2) (match_dup 3)))
3472 (match_op_dup 0 [(match_dup 4) (const_int 0)])
3473 (label_ref (match_operand 1))
3476 operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
3478 [(set_attr "type" "branch")])
3481 ;; ....................
3483 ;; SETTING A REGISTER FROM A COMPARISON
3485 ;; ....................
3487 ;; Destination is always set in SI mode.
3489 (define_expand "cstore<mode>4"
3490 [(set (match_operand:SI 0 "register_operand")
3491 (match_operator:SI 1 "ordered_comparison_operator"
3492 [(match_operand:GPR 2 "register_operand")
3493 (match_operand:GPR 3 "nonmemory_operand")]))]
3496 riscv_expand_int_scc (operands[0], GET_CODE (operands[1]), operands[2],
3501 (define_expand "cstore<mode>4"
3502 [(set (match_operand:SI 0 "register_operand")
3503 (match_operator:SI 1 "fp_scc_comparison"
3504 [(match_operand:ANYF 2 "register_operand")
3505 (match_operand:ANYF 3 "register_operand")]))]
3506 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3508 riscv_expand_float_scc (operands[0], GET_CODE (operands[1]), operands[2],
3513 (define_insn "*cstore<ANYF:mode><X:mode>4"
3514 [(set (match_operand:X 0 "register_operand" "=r")
3515 (match_operator:X 1 "fp_native_comparison"
3516 [(match_operand:ANYF 2 "register_operand" " f")
3517 (match_operand:ANYF 3 "register_operand" " f")]))]
3518 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3519 "f%C1.<fmt>\t%0,%2,%3"
3520 [(set_attr "type" "fcmp")
3521 (set_attr "mode" "<UNITMODE>")])
3523 (define_expand "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4"
3524 [(set (match_operand:X 0 "register_operand")
3525 (unspec:X [(match_operand:ANYF 1 "register_operand")
3526 (match_operand:ANYF 2 "register_operand")]
3528 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3530 rtx op0 = operands[0];
3531 rtx op1 = operands[1];
3532 rtx op2 = operands[2];
3535 emit_insn (gen_f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa(op0, op1, op2));
3538 rtx tmp = gen_reg_rtx (SImode);
3539 rtx cmp = gen_rtx_<QUIET_PATTERN> (<X:MODE>mode, op1, op2);
3540 rtx frflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, const0_rtx),
3542 rtx fsflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, tmp),
3545 emit_insn (gen_rtx_SET (tmp, frflags));
3546 emit_insn (gen_rtx_SET (op0, cmp));
3547 emit_insn (fsflags);
3550 if (HONOR_SNANS (<ANYF:MODE>mode))
3551 emit_insn (gen_rtx_UNSPEC_VOLATILE (<ANYF:MODE>mode,
3552 gen_rtvec (2, op1, op2),
3557 (define_insn "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa"
3558 [(set (match_operand:X 0 "register_operand" "=r")
3560 [(match_operand:ANYF 1 "register_operand" " f")
3561 (match_operand:ANYF 2 "register_operand" " f")]
3563 "TARGET_HARD_FLOAT && TARGET_ZFA"
3564 "f<quiet_pattern>q.<fmt>\t%0,%1,%2"
3565 [(set_attr "type" "fcmp")
3566 (set_attr "mode" "<UNITMODE>")
3567 (set (attr "length") (const_int 16))])
3569 ;; fclass instruction output bitmap
3570 ;; 0 negative infinity
3571 ;; 1 negative normal number.
3572 ;; 2 negative subnormal number.
3575 ;; 5 positive subnormal number.
3576 ;; 6 positive normal number.
3577 ;; 7 positive infinity
3581 (define_insn "fclass<ANYF:mode><X:mode>"
3582 [(set (match_operand:X 0 "register_operand" "=r")
3583 (unspec [(match_operand:ANYF 1 "register_operand" " f")]
3586 "fclass.<fmt>\t%0,%1";
3587 [(set_attr "type" "fcmp")
3588 (set_attr "mode" "<UNITMODE>")])
3590 ;; Implements optab for isfinite, isnormal, isinf
3592 (define_int_iterator FCLASS_MASK [126 66 129])
3593 (define_int_attr fclass_optab
3598 (define_expand "<FCLASS_MASK:fclass_optab><ANYF:mode>2"
3599 [(match_operand 0 "register_operand" "=r")
3600 (match_operand:ANYF 1 "register_operand" " f")
3601 (const_int FCLASS_MASK)]
3604 if (GET_MODE (operands[0]) != SImode
3605 && GET_MODE (operands[0]) != word_mode)
3608 rtx t = gen_reg_rtx (word_mode);
3609 rtx t_op0 = gen_reg_rtx (word_mode);
3612 emit_insn (gen_fclass<ANYF:mode>di (t, operands[1]));
3614 emit_insn (gen_fclass<ANYF:mode>si (t, operands[1]));
3616 riscv_emit_binary (AND, t, t, GEN_INT (<FCLASS_MASK>));
3617 rtx cmp = gen_rtx_NE (word_mode, t, const0_rtx);
3618 emit_insn (gen_cstore<mode>4 (t_op0, cmp, t, const0_rtx));
3622 t_op0 = gen_lowpart (SImode, t_op0);
3623 SUBREG_PROMOTED_VAR_P (t_op0) = 1;
3624 SUBREG_PROMOTED_SET (t_op0, SRP_SIGNED);
3627 emit_move_insn (operands[0], t_op0);
3631 (define_insn "*seq_zero_<X:mode><GPR:mode>"
3632 [(set (match_operand:GPR 0 "register_operand" "=r")
3633 (eq:GPR (match_operand:X 1 "register_operand" " r")
3637 [(set_attr "type" "slt")
3638 (set_attr "mode" "<X:MODE>")])
3640 (define_insn "*sne_zero_<X:mode><GPR:mode>"
3641 [(set (match_operand:GPR 0 "register_operand" "=r")
3642 (ne:GPR (match_operand:X 1 "register_operand" " r")
3646 [(set_attr "type" "slt")
3647 (set_attr "mode" "<X:MODE>")])
3649 (define_insn "*sgt<u>_<X:mode><GPR:mode>"
3650 [(set (match_operand:GPR 0 "register_operand" "= r")
3651 (any_gt:GPR (match_operand:X 1 "register_operand" " r")
3652 (match_operand:X 2 "reg_or_0_operand" " rJ")))]
3655 [(set_attr "type" "slt")
3656 (set_attr "mode" "<X:MODE>")])
3658 (define_insn "*sge<u>_<X:mode><GPR:mode>"
3659 [(set (match_operand:GPR 0 "register_operand" "=r")
3660 (any_ge:GPR (match_operand:X 1 "register_operand" " r")
3663 "slti<u>\t%0,zero,%1"
3664 [(set_attr "type" "slt")
3665 (set_attr "mode" "<X:MODE>")])
3667 (define_insn "@slt<u>_<X:mode><GPR:mode>3"
3668 [(set (match_operand:GPR 0 "register_operand" "= r")
3669 (any_lt:GPR (match_operand:X 1 "register_operand" " r")
3670 (match_operand:X 2 "arith_operand" " rI")))]
3672 "slt%i2<u>\t%0,%1,%2"
3673 [(set_attr "type" "slt")
3674 (set_attr "mode" "<X:MODE>")])
3676 (define_insn "*sle<u>_<X:mode><GPR:mode>"
3677 [(set (match_operand:GPR 0 "register_operand" "=r")
3678 (any_le:GPR (match_operand:X 1 "register_operand" " r")
3679 (match_operand:X 2 "sle_operand" "")))]
3682 operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
3683 return "slt%i2<u>\t%0,%1,%2";
3685 [(set_attr "type" "slt")
3686 (set_attr "mode" "<X:MODE>")])
3689 ;; ....................
3691 ;; UNCONDITIONAL BRANCHES
3693 ;; ....................
3695 ;; Unconditional branches.
3698 [(set (pc) (label_ref (match_operand 0 "" "")))]
3701 /* Hopefully this does not happen often as this is going
3702 to clobber $ra and muck up the return stack predictors. */
3703 if (get_attr_length (insn) == 8)
3704 return "jump\t%l0,ra";
3708 [(set_attr "type" "jump")
3709 (set_attr "mode" "none")])
3711 (define_expand "indirect_jump"
3712 [(set (pc) (match_operand 0 "register_operand"))]
3715 if (is_zicfilp_p ())
3716 emit_insn (gen_set_lpl (Pmode, const0_rtx));
3718 operands[0] = force_reg (Pmode, operands[0]);
3719 if (is_zicfilp_p ())
3720 emit_use (gen_rtx_REG (Pmode, T2_REGNUM));
3722 if (Pmode == SImode)
3723 emit_jump_insn (gen_indirect_jumpsi (operands[0]));
3725 emit_jump_insn (gen_indirect_jumpdi (operands[0]));
3730 (define_insn "indirect_jump<mode>"
3731 [(set (pc) (match_operand:P 0 "register_operand" "l"))]
3734 [(set_attr "type" "jalr")
3735 (set_attr "mode" "none")])
3737 (define_expand "tablejump"
3738 [(set (pc) (match_operand 0 "register_operand" ""))
3739 (use (label_ref (match_operand 1 "" "")))]
3742 if (CASE_VECTOR_PC_RELATIVE)
3743 operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
3744 gen_rtx_LABEL_REF (Pmode, operands[1]),
3745 NULL_RTX, 0, OPTAB_DIRECT);
3747 if (is_zicfilp_p ())
3749 rtx t2 = RISCV_CALL_ADDRESS_LPAD (GET_MODE (operands[0]));
3750 emit_move_insn (t2, operands[0]);
3752 if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
3753 emit_jump_insn (gen_tablejump_cfidi (operands[1]));
3755 emit_jump_insn (gen_tablejump_cfisi (operands[1]));
3759 if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
3760 emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
3762 emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
3767 (define_insn "tablejump<mode>"
3768 [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
3769 (use (label_ref (match_operand 1 "" "")))]
3772 [(set_attr "type" "jalr")
3773 (set_attr "mode" "none")])
3775 (define_insn "tablejump_cfi<mode>"
3776 [(set (pc) (reg:GPR T2_REGNUM))
3777 (use (label_ref (match_operand 0 "")))]
3780 [(set_attr "type" "jalr")
3781 (set_attr "mode" "none")])
3784 ;; ....................
3786 ;; Function prologue/epilogue
3788 ;; ....................
3791 (define_expand "prologue"
3795 riscv_expand_prologue ();
3799 ;; Block any insns from being moved before this point, since the
3800 ;; profiling call to mcount can use various registers that aren't
3801 ;; saved or used to pass arguments.
3803 (define_insn "blockage"
3804 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
3807 [(set_attr "type" "ghost")
3808 (set_attr "mode" "none")])
3810 (define_expand "epilogue"
3814 riscv_expand_epilogue (NORMAL_RETURN);
3818 (define_expand "sibcall_epilogue"
3822 riscv_expand_epilogue (SIBCALL_RETURN);
3826 ;; Trivial return. Make it look like a normal return insn as that
3827 ;; allows jump optimizations to work better.
3829 (define_expand "return"
3831 "riscv_can_use_return_insn ()"
3834 (define_insn "simple_return"
3838 return riscv_output_return ();
3840 [(set_attr "type" "jalr")
3841 (set_attr "mode" "none")])
3845 (define_insn "simple_return_internal"
3847 (use (match_operand 0 "pmode_register_operand" ""))]
3850 [(set_attr "type" "jalr")
3851 (set_attr "mode" "none")])
3853 ;; This is used in compiling the unwind routines.
3854 (define_expand "eh_return"
3855 [(use (match_operand 0 "general_operand"))]
3858 if (GET_MODE (operands[0]) != word_mode)
3859 operands[0] = convert_to_mode (word_mode, operands[0], 0);
3861 emit_insn (gen_eh_set_lr_di (operands[0]));
3863 emit_insn (gen_eh_set_lr_si (operands[0]));
3865 emit_jump_insn (gen_eh_return_internal ());
3870 ;; Clobber the return address on the stack. We can't expand this
3871 ;; until we know where it will be put in the stack frame.
3873 (define_insn "eh_set_lr_si"
3874 [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3875 (clobber (match_scratch:SI 1 "=&r"))]
3878 [(set_attr "type" "jump")])
3880 (define_insn "eh_set_lr_di"
3881 [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3882 (clobber (match_scratch:DI 1 "=&r"))]
3885 [(set_attr "type" "jump")])
3888 [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
3889 (clobber (match_scratch 1))]
3893 riscv_set_return_address (operands[0], operands[1]);
3897 (define_insn_and_split "eh_return_internal"
3901 "epilogue_completed"
3903 "riscv_expand_epilogue (EXCEPTION_RETURN); DONE;"
3904 [(set_attr "type" "ret")])
3907 ;; ....................
3911 ;; ....................
3913 (define_expand "sibcall"
3914 [(parallel [(call (match_operand 0 "")
3915 (match_operand 1 ""))
3917 (match_operand 2 "const_int_operand")
3918 ] UNSPEC_CALLEE_CC))])]
3921 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3922 emit_call_insn (gen_sibcall_internal (target, operands[1], operands[2]));
3926 (define_insn "sibcall_internal"
3927 [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S,U"))
3928 (match_operand 1 "" ""))
3930 (match_operand 2 "const_int_operand")
3931 ] UNSPEC_CALLEE_CC))]
3932 "SIBLING_CALL_P (insn)"
3937 [(set_attr "type" "call")])
3939 (define_expand "sibcall_value"
3940 [(parallel [(set (match_operand 0 "")
3941 (call (match_operand 1 "")
3942 (match_operand 2 "")))
3944 (match_operand 3 "const_int_operand")
3945 ] UNSPEC_CALLEE_CC))])]
3948 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3949 emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2],
3954 (define_insn "sibcall_value_internal"
3955 [(set (match_operand 0 "" "")
3956 (call (mem:SI (match_operand 1 "call_insn_operand" "j,S,U"))
3957 (match_operand 2 "" "")))
3959 (match_operand 3 "const_int_operand")
3960 ] UNSPEC_CALLEE_CC))]
3961 "SIBLING_CALL_P (insn)"
3966 [(set_attr "type" "call")])
3968 (define_expand "call"
3969 [(parallel [(call (match_operand 0 "")
3970 (match_operand 1 ""))
3972 (match_operand 2 "const_int_operand")
3973 ] UNSPEC_CALLEE_CC))])]
3976 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3977 emit_call_insn (gen_call_internal (target, operands[1], operands[2]));
3981 (define_insn "call_internal"
3982 [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S,U"))
3983 (match_operand 1 "" ""))
3985 (match_operand 2 "const_int_operand")
3986 ] UNSPEC_CALLEE_CC))
3987 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3993 [(set_attr "type" "call")])
3995 (define_expand "call_value"
3996 [(parallel [(set (match_operand 0 "")
3997 (call (match_operand 1 "")
3998 (match_operand 2 "")))
4000 (match_operand 3 "const_int_operand")
4001 ] UNSPEC_CALLEE_CC))])]
4004 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
4005 emit_call_insn (gen_call_value_internal (operands[0], target, operands[2],
4010 (define_insn "call_value_internal"
4011 [(set (match_operand 0 "" "")
4012 (call (mem:SI (match_operand 1 "call_insn_operand" "l,S,U"))
4013 (match_operand 2 "" "")))
4015 (match_operand 3 "const_int_operand")
4016 ] UNSPEC_CALLEE_CC))
4017 (clobber (reg:SI RETURN_ADDR_REGNUM))]
4023 [(set_attr "type" "call")])
4025 ;; Call subroutine returning any type.
4027 (define_expand "untyped_call"
4028 [(parallel [(call (match_operand 0 "")
4030 (match_operand 1 "")
4031 (match_operand 2 "")])]
4036 /* Untyped calls always use the RISCV_CC_BASE calling convention. */
4037 emit_call_insn (gen_call (operands[0], const0_rtx,
4038 gen_int_mode (RISCV_CC_BASE, SImode)));
4040 for (i = 0; i < XVECLEN (operands[2], 0); i++)
4042 rtx set = XVECEXP (operands[2], 0, i);
4043 riscv_emit_move (SET_DEST (set), SET_SRC (set));
4046 emit_insn (gen_blockage ());
4054 [(set_attr "type" "nop")
4055 (set_attr "mode" "none")])
4058 [(trap_if (const_int 1) (const_int 0))]
4061 [(set_attr "type" "trap")])
4063 ;; Must use the registers that we save to prevent the rename reg optimization
4064 ;; pass from using them before the gpr_save pattern when shrink wrapping
4065 ;; occurs. See bug 95252 for instance.
4067 (define_insn "gpr_save"
4068 [(match_parallel 1 "gpr_save_operation"
4069 [(unspec_volatile [(match_operand 0 "const_int_operand")]
4070 UNSPECV_GPR_SAVE)])]
4072 "call\tt0,__riscv_save_%0"
4073 [(set_attr "type" "call")])
4075 (define_insn "gpr_restore"
4076 [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)]
4078 "tail\t__riscv_restore_%0"
4079 [(set_attr "type" "call")])
4081 (define_insn "gpr_restore_return"
4083 (use (match_operand 0 "pmode_register_operand" ""))
4087 [(set_attr "type" "ret")])
4089 (define_insn "riscv_frcsr"
4090 [(set (match_operand:SI 0 "register_operand" "=r")
4091 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRCSR))]
4092 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4094 [(set_attr "type" "fmove")])
4096 (define_insn "riscv_fscsr"
4097 [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] UNSPECV_FSCSR)]
4098 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4100 [(set_attr "type" "fmove")])
4102 (define_insn "riscv_frflags"
4103 [(set (match_operand:SI 0 "register_operand" "=r")
4104 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRFLAGS))]
4105 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4107 [(set_attr "type" "fmove")])
4109 (define_insn "riscv_fsflags"
4110 [(unspec_volatile [(match_operand:SI 0 "csr_operand" "rK")] UNSPECV_FSFLAGS)]
4111 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4113 [(set_attr "type" "fmove")])
4115 (define_insn "*riscv_fsnvsnan<mode>2"
4116 [(unspec_volatile [(match_operand:ANYF 0 "register_operand" "f")
4117 (match_operand:ANYF 1 "register_operand" "f")]
4119 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4120 "feq.<fmt>\tzero,%0,%1"
4121 [(set_attr "type" "fcmp")
4122 (set_attr "mode" "<UNITMODE>")])
4124 (define_insn "riscv_mret"
4126 (unspec_volatile [(const_int 0)] UNSPECV_MRET)]
4129 [(set_attr "type" "ret")])
4131 (define_insn "riscv_sret"
4133 (unspec_volatile [(const_int 0)] UNSPECV_SRET)]
4136 [(set_attr "type" "ret")])
4138 (define_insn "riscv_uret"
4140 (unspec_volatile [(const_int 0)] UNSPECV_URET)]
4143 [(set_attr "type" "ret")])
4145 (define_insn "stack_tie<mode>"
4146 [(set (mem:BLK (scratch))
4147 (unspec:BLK [(match_operand:X 0 "register_operand" "r")
4148 (match_operand:X 1 "register_operand" "r")]
4150 "!rtx_equal_p (operands[0], operands[1])"
4152 [(set_attr "type" "ghost")
4153 (set_attr "length" "0")]
4156 (define_expand "save_stack_nonlocal"
4157 [(set (match_operand 0 "memory_operand")
4158 (match_operand 1 "register_operand"))]
4163 if (need_shadow_stack_push_pop_p ())
4165 /* Copy shadow stack pointer to the first slot
4166 and stack pointer to the second slot. */
4167 rtx ssp_slot = adjust_address (operands[0], word_mode, 0);
4168 stack_slot = adjust_address (operands[0], Pmode, UNITS_PER_WORD);
4170 rtx reg_ssp = force_reg (word_mode, const0_rtx);
4171 emit_insn (gen_ssrdp (word_mode, reg_ssp));
4172 emit_move_insn (ssp_slot, reg_ssp);
4175 stack_slot = adjust_address (operands[0], Pmode, 0);
4176 emit_move_insn (stack_slot, operands[1]);
4180 ;; This fixes a failure with gcc.c-torture/execute/pr64242.c at -O2 for a
4181 ;; 32-bit target when using -mtune=sifive-7-series. The first sched pass
4182 ;; runs before register elimination, and we have a non-obvious dependency
4183 ;; between a use of the soft fp and a set of the hard fp. We fix this by
4184 ;; emitting a clobber using the hard fp between the two insns.
4185 (define_expand "restore_stack_nonlocal"
4186 [(match_operand 0 "register_operand")
4187 (match_operand 1 "memory_operand")]
4192 if (need_shadow_stack_push_pop_p ())
4194 rtx t0 = gen_rtx_REG (Pmode, RISCV_PROLOGUE_TEMP_REGNUM);
4195 /* Restore shadow stack pointer from the first slot
4196 and stack pointer from the second slot. */
4197 rtx ssp_slot = adjust_address (operands[1], word_mode, 0);
4198 stack_slot = adjust_address (operands[1], Pmode, UNITS_PER_WORD);
4200 /* Get the current shadow stack pointer. */
4201 rtx cur_ssp = force_reg (word_mode, const0_rtx);
4202 emit_insn (gen_ssrdp (word_mode, cur_ssp));
4204 /* Compare and jump over adjustment code. */
4205 rtx noadj_label = gen_label_rtx ();
4206 emit_cmp_and_jump_insns (cur_ssp, const0_rtx, EQ, NULL_RTX,
4207 word_mode, 1, noadj_label);
4209 rtx loop_label = gen_label_rtx ();
4210 emit_label (loop_label);
4211 LABEL_NUSES (loop_label) = 1;
4213 /* Check if current ssp less than jump buffer ssp,
4214 so no loop is needed. */
4215 emit_cmp_and_jump_insns (ssp_slot, cur_ssp, LE, NULL_RTX,
4216 ptr_mode, 1, noadj_label);
4218 /* Advance by a maximum of 4K at a time to avoid unwinding
4219 past bounds of the shadow stack. */
4220 rtx reg_4096 = force_reg (word_mode, GEN_INT (4096));
4221 rtx cmp_ssp = gen_reg_rtx (word_mode);
4222 cmp_ssp = expand_simple_binop (ptr_mode, MINUS,
4224 cmp_ssp, 1, OPTAB_DIRECT);
4226 /* Update curr_ssp from jump buffer ssp. */
4227 emit_move_insn (cur_ssp, ssp_slot);
4228 emit_insn (gen_write_ssp (word_mode, cur_ssp));
4229 emit_jump_insn (gen_jump (loop_label));
4232 /* Adjust the ssp in a loop. */
4233 rtx cmp_4k_label = gen_label_rtx ();
4234 emit_label (cmp_4k_label);
4235 LABEL_NUSES (cmp_4k_label) = 1;
4237 /* Add 4k for curr_ssp. */
4238 cur_ssp = expand_simple_binop (ptr_mode, PLUS,
4240 cur_ssp, 1, OPTAB_DIRECT);
4241 emit_insn (gen_write_ssp (word_mode, cur_ssp));
4242 emit_insn (gen_sspush (Pmode, t0));
4243 emit_insn (gen_sspopchk (Pmode, t0));
4244 emit_jump_insn (gen_jump (loop_label));
4247 emit_label (noadj_label);
4248 LABEL_NUSES (noadj_label) = 1;
4251 stack_slot = adjust_address (operands[1], Pmode, 0);
4253 emit_move_insn (operands[0], stack_slot);
4254 /* Prevent the following hard fp restore from being moved before the move
4255 insn above which uses a copy of the soft fp reg. */
4256 emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
4260 ;; Named pattern for expanding thread pointer reference.
4261 (define_expand "get_thread_pointer<mode>"
4262 [(set (match_operand:P 0 "register_operand" "=r")
4267 ;; Named patterns for stack smashing protection.
4269 (define_expand "stack_protect_set"
4270 [(match_operand 0 "memory_operand")
4271 (match_operand 1 "memory_operand")]
4274 machine_mode mode = GET_MODE (operands[0]);
4275 if (riscv_stack_protector_guard == SSP_TLS)
4277 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
4278 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
4279 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
4280 operands[1] = gen_rtx_MEM (Pmode, addr);
4283 emit_insn ((mode == DImode
4284 ? gen_stack_protect_set_di
4285 : gen_stack_protect_set_si) (operands[0], operands[1]));
4289 ;; DO NOT SPLIT THIS PATTERN. It is important for security reasons that the
4290 ;; canary value does not live beyond the life of this sequence.
4291 (define_insn "stack_protect_set_<mode>"
4292 [(set (match_operand:GPR 0 "memory_operand" "=m")
4293 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")]
4295 (set (match_scratch:GPR 2 "=&r") (const_int 0))]
4297 "<load>\t%2, %1\;<store>\t%2, %0\;li\t%2, 0"
4298 [(set_attr "type" "multi")
4299 (set_attr "length" "12")])
4301 (define_expand "stack_protect_test"
4302 [(match_operand 0 "memory_operand")
4303 (match_operand 1 "memory_operand")
4308 machine_mode mode = GET_MODE (operands[0]);
4310 result = gen_reg_rtx(mode);
4311 if (riscv_stack_protector_guard == SSP_TLS)
4313 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
4314 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
4315 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
4316 operands[1] = gen_rtx_MEM (Pmode, addr);
4318 emit_insn ((mode == DImode
4319 ? gen_stack_protect_test_di
4320 : gen_stack_protect_test_si) (result,
4324 rtx cond = gen_rtx_EQ (VOIDmode, result, const0_rtx);
4325 emit_jump_insn (gen_cbranch4 (mode, cond, result, const0_rtx, operands[2]));
4330 (define_insn "stack_protect_test_<mode>"
4331 [(set (match_operand:GPR 0 "register_operand" "=r")
4332 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")
4333 (match_operand:GPR 2 "memory_operand" "m")]
4335 (clobber (match_scratch:GPR 3 "=&r"))]
4337 "<load>\t%3, %1\;<load>\t%0, %2\;xor\t%0, %3, %0\;li\t%3, 0"
4338 [(set_attr "type" "multi")
4339 (set_attr "length" "12")])
4341 (define_insn "riscv_clean_<mode>"
4342 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4346 [(set_attr "type" "store")]
4349 (define_insn "riscv_flush_<mode>"
4350 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4354 [(set_attr "type" "store")]
4357 (define_insn "riscv_inval_<mode>"
4358 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4362 [(set_attr "type" "store")]
4365 (define_insn "riscv_zero_<mode>"
4366 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4370 [(set_attr "type" "store")]
4373 (define_insn "prefetch"
4374 [(prefetch (match_operand 0 "address_operand" "r")
4375 (match_operand 1 "imm5_operand" "i")
4376 (match_operand 2 "const_int_operand" "n"))]
4379 switch (INTVAL (operands[1]))
4382 case 2: return TARGET_ZIHINTNTL ? "%L2prefetch.r\t%a0" : "prefetch.r\t%a0";
4383 case 1: return TARGET_ZIHINTNTL ? "%L2prefetch.w\t%a0" : "prefetch.w\t%a0";
4384 default: gcc_unreachable ();
4387 [(set_attr "type" "store")
4388 (set (attr "length") (if_then_else (and (match_test "TARGET_ZIHINTNTL")
4389 (match_test "IN_RANGE (INTVAL (operands[2]), 0, 2)"))
4391 (const_string "4")))])
4393 (define_insn "riscv_prefetchi_<mode>"
4394 [(unspec_volatile:X [(match_operand:X 0 "address_operand" "r")
4395 (match_operand:X 1 "imm5_operand" "i")]
4399 [(set_attr "type" "store")])
4401 (define_expand "extv<mode>"
4402 [(set (match_operand:GPR 0 "register_operand" "=r")
4403 (sign_extract:GPR (match_operand:GPR 1 "register_operand" "r")
4404 (match_operand 2 "const_int_operand")
4405 (match_operand 3 "const_int_operand")))]
4409 (define_expand "extzv<mode>"
4410 [(set (match_operand:GPR 0 "register_operand" "=r")
4411 (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r")
4412 (match_operand 2 "const_int_operand")
4413 (match_operand 3 "const_int_operand")))]
4417 && (INTVAL (operands[2]) < 8) && (INTVAL (operands[3]) == 0))
4421 (define_expand "maddhisi4"
4422 [(set (match_operand:SI 0 "register_operand")
4424 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
4425 (sign_extend:SI (match_operand:HI 2 "register_operand")))
4426 (match_operand:SI 3 "register_operand")))]
4430 (define_expand "msubhisi4"
4431 [(set (match_operand:SI 0 "register_operand")
4433 (match_operand:SI 3 "register_operand")
4434 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
4435 (sign_extend:SI (match_operand:HI 2 "register_operand")))))]
4439 ;; String compare with length insn.
4440 ;; Argument 0 is the target (result)
4441 ;; Argument 1 is the source1
4442 ;; Argument 2 is the source2
4443 ;; Argument 3 is the length
4444 ;; Argument 4 is the alignment
4446 (define_expand "cmpstrnsi"
4447 [(parallel [(set (match_operand:SI 0)
4448 (compare:SI (match_operand:BLK 1)
4449 (match_operand:BLK 2)))
4450 (use (match_operand:SI 3))
4451 (use (match_operand:SI 4))])]
4452 "riscv_inline_strncmp && !optimize_size
4453 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
4455 rtx temp = gen_reg_rtx (word_mode);
4456 if (riscv_expand_strcmp (temp, operands[1], operands[2],
4457 operands[3], operands[4]))
4461 temp = gen_lowpart (SImode, temp);
4462 SUBREG_PROMOTED_VAR_P (temp) = 1;
4463 SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
4465 emit_move_insn (operands[0], temp);
4472 ;; String compare insn.
4473 ;; Argument 0 is the target (result)
4474 ;; Argument 1 is the source1
4475 ;; Argument 2 is the source2
4476 ;; Argument 3 is the alignment
4478 (define_expand "cmpstrsi"
4479 [(parallel [(set (match_operand:SI 0)
4480 (compare:SI (match_operand:BLK 1)
4481 (match_operand:BLK 2)))
4482 (use (match_operand:SI 3))])]
4483 "riscv_inline_strcmp && !optimize_size
4484 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
4486 rtx temp = gen_reg_rtx (word_mode);
4487 if (riscv_expand_strcmp (temp, operands[1], operands[2],
4488 NULL_RTX, operands[3]))
4492 temp = gen_lowpart (SImode, temp);
4493 SUBREG_PROMOTED_VAR_P (temp) = 1;
4494 SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
4496 emit_move_insn (operands[0], temp);
4503 ;; Search character in string (generalization of strlen).
4504 ;; Argument 0 is the resulting offset
4505 ;; Argument 1 is the string
4506 ;; Argument 2 is the search character
4507 ;; Argument 3 is the alignment
4509 (define_expand "strlen<mode>"
4510 [(set (match_operand:X 0 "register_operand")
4511 (unspec:X [(match_operand:BLK 1 "general_operand")
4512 (match_operand:SI 2 "const_int_operand")
4513 (match_operand:SI 3 "const_int_operand")]
4515 "riscv_inline_strlen && !optimize_size
4516 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
4518 rtx search_char = operands[2];
4520 if (search_char != const0_rtx)
4523 if (riscv_expand_strlen (operands[0], operands[1], operands[2], operands[3]))
4529 (define_insn "*large_load_address"
4530 [(set (match_operand:DI 0 "register_operand" "=r")
4531 (mem:DI (match_operand 1 "pcrel_symbol_operand" "")))]
4532 "TARGET_64BIT && riscv_cmodel == CM_LARGE"
4534 [(set_attr "type" "load")
4535 (set (attr "length") (const_int 8))])
4537 ;; The AND is redunant here. It always turns off the high 32 bits and the
4538 ;; low number of bits equal to the shift count. Those upper 32 bits will be
4539 ;; reset by the SIGN_EXTEND at the end.
4541 ;; One could argue combine should have realized this and simplified what it
4542 ;; presented to the backend. But we can obviously cope with what it gave us.
4543 (define_insn_and_split ""
4544 [(set (match_operand:DI 0 "register_operand" "=r")
4548 (ashift:DI (match_operand:DI 1 "register_operand" "r")
4549 (match_operand 2 "const_int_operand" "n"))
4550 (match_operand 3 "const_int_operand" "n")) 0)
4551 (match_operand:SI 4 "register_operand" "r"))))
4552 (clobber (match_scratch:DI 5 "=&r"))]
4554 && (INTVAL (operands[3]) | ((1 << INTVAL (operands[2])) - 1)) == 0xffffffff"
4556 "&& reload_completed"
4557 [(set (match_dup 5) (ashift:DI (match_dup 1) (match_dup 2)))
4558 (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 6) (match_dup 4))))]
4559 "{ operands[6] = gen_lowpart (SImode, operands[5]); }"
4560 [(set_attr "type" "arith")])
4562 (define_expand "usadd<mode>3"
4563 [(match_operand:ANYI 0 "register_operand")
4564 (match_operand:ANYI 1 "reg_or_int_operand")
4565 (match_operand:ANYI 2 "reg_or_int_operand")]
4568 riscv_expand_usadd (operands[0], operands[1], operands[2]);
4573 (define_expand "ssadd<mode>3"
4574 [(match_operand:ANYI 0 "register_operand")
4575 (match_operand:ANYI 1 "register_operand")
4576 (match_operand:ANYI 2 "register_operand")]
4579 riscv_expand_ssadd (operands[0], operands[1], operands[2]);
4584 (define_expand "ussub<mode>3"
4585 [(match_operand:ANYI 0 "register_operand")
4586 (match_operand:ANYI 1 "reg_or_int_operand")
4587 (match_operand:ANYI 2 "reg_or_int_operand")]
4590 riscv_expand_ussub (operands[0], operands[1], operands[2]);
4595 (define_expand "sssub<mode>3"
4596 [(match_operand:ANYI 0 "register_operand")
4597 (match_operand:ANYI 1 "register_operand")
4598 (match_operand:ANYI 2 "register_operand")]
4601 riscv_expand_sssub (operands[0], operands[1], operands[2]);
4606 (define_expand "ustrunc<mode><anyi_double_truncated>2"
4607 [(match_operand:<ANYI_DOUBLE_TRUNCATED> 0 "register_operand")
4608 (match_operand:ANYI_DOUBLE_TRUNC 1 "register_operand")]
4611 riscv_expand_ustrunc (operands[0], operands[1]);
4616 (define_expand "sstrunc<mode><anyi_double_truncated>2"
4617 [(match_operand:<ANYI_DOUBLE_TRUNCATED> 0 "register_operand")
4618 (match_operand:ANYI_DOUBLE_TRUNC 1 "register_operand")]
4621 riscv_expand_sstrunc (operands[0], operands[1]);
4626 (define_expand "ustrunc<mode><anyi_quad_truncated>2"
4627 [(match_operand:<ANYI_QUAD_TRUNCATED> 0 "register_operand")
4628 (match_operand:ANYI_QUAD_TRUNC 1 "register_operand")]
4631 riscv_expand_ustrunc (operands[0], operands[1]);
4636 (define_expand "sstrunc<mode><anyi_quad_truncated>2"
4637 [(match_operand:<ANYI_QUAD_TRUNCATED> 0 "register_operand")
4638 (match_operand:ANYI_QUAD_TRUNC 1 "register_operand")]
4641 riscv_expand_sstrunc (operands[0], operands[1]);
4646 (define_expand "ustrunc<mode><anyi_oct_truncated>2"
4647 [(match_operand:<ANYI_OCT_TRUNCATED> 0 "register_operand")
4648 (match_operand:ANYI_OCT_TRUNC 1 "register_operand")]
4651 riscv_expand_ustrunc (operands[0], operands[1]);
4656 (define_expand "sstrunc<mode><anyi_oct_truncated>2"
4657 [(match_operand:<ANYI_OCT_TRUNCATED> 0 "register_operand")
4658 (match_operand:ANYI_OCT_TRUNC 1 "register_operand")]
4661 riscv_expand_sstrunc (operands[0], operands[1]);
4666 ;; These are forms of (x << C1) + C2, potentially canonicalized from
4667 ;; ((x + C2') << C1. Depending on the cost to load C2 vs C2' we may
4668 ;; want to go ahead and recognize this form as C2 may be cheaper to
4669 ;; synthesize than C2'.
4671 ;; It might be better to refactor riscv_const_insns a bit so that we
4672 ;; can have an API that passes integer values around rather than
4673 ;; constructing a lot of garbage RTL.
4675 ;; The mvconst_internal pattern in effect requires this pattern to
4676 ;; also be a define_insn_and_split due to insn count costing when
4677 ;; splitting in combine.
4678 (define_insn_and_split ""
4679 [(set (match_operand:DI 0 "register_operand" "=r")
4680 (plus:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
4681 (match_operand 2 "const_int_operand" "n"))
4682 (match_operand 3 "const_int_operand" "n")))
4683 (clobber (match_scratch:DI 4 "=&r"))]
4684 "(TARGET_64BIT && riscv_const_insns (operands[3], false) == 1)"
4686 "&& reload_completed"
4687 [(set (match_dup 0) (ashift:DI (match_dup 1) (match_dup 2)))
4688 (set (match_dup 4) (match_dup 3))
4689 (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
4691 [(set_attr "type" "arith")])
4693 (define_insn_and_split ""
4694 [(set (match_operand:DI 0 "register_operand" "=r")
4695 (sign_extend:DI (plus:SI (ashift:SI
4696 (match_operand:SI 1 "register_operand" "r")
4697 (match_operand 2 "const_int_operand" "n"))
4698 (match_operand 3 "const_int_operand" "n"))))
4699 (clobber (match_scratch:DI 4 "=&r"))]
4700 "(TARGET_64BIT && riscv_const_insns (operands[3], false) == 1)"
4702 "&& reload_completed"
4703 [(set (match_dup 0) (ashift:DI (match_dup 1) (match_dup 2)))
4704 (set (match_dup 4) (match_dup 3))
4705 (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 5) (match_dup 6))))]
4707 operands[1] = gen_lowpart (DImode, operands[1]);
4708 operands[5] = gen_lowpart (SImode, operands[0]);
4709 operands[6] = gen_lowpart (SImode, operands[4]);
4711 [(set_attr "type" "arith")])
4715 (define_insn "@sspush<mode>"
4716 [(unspec_volatile [(match_operand:P 0 "x1x5_operand" "r")] UNSPECV_SSPUSH)]
4719 [(set_attr "type" "arith")
4720 (set_attr "mode" "<MODE>")])
4722 (define_insn "@sspopchk<mode>"
4723 [(unspec_volatile [(match_operand:P 0 "x1x5_operand" "r")] UNSPECV_SSPOPCHK)]
4726 [(set_attr "type" "arith")
4727 (set_attr "mode" "<MODE>")])
4729 (define_insn "@ssrdp<mode>"
4730 [(set (match_operand:P 0 "register_operand" "=r")
4731 (unspec_volatile [(const_int 0)] UNSPECV_SSRDP))]
4734 [(set_attr "type" "arith")
4735 (set_attr "mode" "<MODE>")])
4737 (define_insn "@write_ssp<mode>"
4738 [(unspec_volatile [(match_operand:P 0 "register_operand" "r")] UNSPECV_SSP)]
4741 [(set_attr "type" "arith")
4742 (set_attr "mode" "<MODE>")])
4747 [(unspec_volatile [(match_operand 0 "immediate_operand" "i")] UNSPECV_LPAD)]
4750 [(set_attr "type" "auipc")])
4752 (define_insn "@set_lpl<mode>"
4753 [(set (reg:GPR T2_REGNUM)
4754 (unspec_volatile [(match_operand:GPR 0 "immediate_operand" "i")] UNSPECV_SETLPL))]
4757 [(set_attr "type" "const")
4758 (set_attr "mode" "<MODE>")])
4760 (define_insn "lpad_align"
4761 [(unspec_volatile [(const_int 0)] UNSPECV_LPAD_ALIGN)]
4764 [(set_attr "type" "nop")])
4766 (define_insn "@set_guarded<mode>"
4767 [(set (reg:GPR T2_REGNUM)
4768 (unspec_volatile [(match_operand:GPR 0 "register_operand" "r")] UNSPECV_SET_GUARDED))]
4771 [(set_attr "type" "move")
4772 (set_attr "mode" "<MODE>")])
4774 (include "bitmanip.md")
4775 (include "crypto.md")
4777 (include "sync-rvwmo.md")
4778 (include "sync-ztso.md")
4779 (include "peephole.md")
4781 (include "generic.md")
4782 (include "sifive-7.md")
4783 (include "sifive-p400.md")
4784 (include "sifive-p600.md")
4785 (include "thead.md")
4786 (include "generic-vector-ooo.md")
4787 (include "generic-ooo.md")
4788 (include "vector.md")
4789 (include "vector-crypto.md")
4790 (include "vector-bfloat16.md")
4791 (include "zicond.md")
4794 (include "corev.md")
4795 (include "xiangshan.md")