1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
63 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
64 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode
, rtx i
)
72 return gen_int_mode (- INTVAL (i
), mode
);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode
, rtx x
)
81 unsigned HOST_WIDE_INT val
;
84 if (GET_MODE_CLASS (mode
) != MODE_INT
)
87 width
= GET_MODE_BITSIZE (mode
);
91 if (width
<= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_INT
)
94 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x
) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x
) == 0)
98 val
= CONST_DOUBLE_HIGH (x
);
99 width
-= HOST_BITS_PER_WIDE_INT
;
104 if (width
< HOST_BITS_PER_WIDE_INT
)
105 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
106 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0
, op1
))
121 tem
= op0
, op0
= op1
, op1
= tem
;
123 /* If this simplifies, do it. */
124 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
128 /* Handle addition and subtraction specially. Otherwise, just form
131 if (code
== PLUS
|| code
== MINUS
)
133 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
138 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x
)
147 enum machine_mode cmode
;
148 HOST_WIDE_INT offset
= 0;
150 switch (GET_CODE (x
))
156 /* Handle float extensions of constant pool references. */
158 c
= avoid_constant_pool_reference (tmp
);
159 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
163 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
164 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
174 /* Call target hook to avoid the effects of -fpic etc.... */
175 addr
= targetm
.delegitimize_address (addr
);
177 /* Split the address into a base and integer offset. */
178 if (GET_CODE (addr
) == CONST
179 && GET_CODE (XEXP (addr
, 0)) == PLUS
180 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
182 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
183 addr
= XEXP (XEXP (addr
, 0), 0);
186 if (GET_CODE (addr
) == LO_SUM
)
187 addr
= XEXP (addr
, 1);
189 /* If this is a constant pool reference, we can turn it into its
190 constant and hope that simplifications happen. */
191 if (GET_CODE (addr
) == SYMBOL_REF
192 && CONSTANT_POOL_ADDRESS_P (addr
))
194 c
= get_pool_constant (addr
);
195 cmode
= get_pool_mode (addr
);
197 /* If we're accessing the constant in a different mode than it was
198 originally stored, attempt to fix that up via subreg simplifications.
199 If that fails we have no choice but to return the original memory. */
200 if (offset
!= 0 || cmode
!= GET_MODE (x
))
202 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
203 if (tem
&& CONSTANT_P (tem
))
213 /* Return true if X is a MEM referencing the constant pool. */
216 constant_pool_reference_p (rtx x
)
218 return avoid_constant_pool_reference (x
) != x
;
221 /* Make a unary operation by first seeing if it folds and otherwise making
222 the specified operation. */
225 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
226 enum machine_mode op_mode
)
230 /* If this simplifies, use it. */
231 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
234 return gen_rtx_fmt_e (code
, mode
, op
);
237 /* Likewise for ternary operations. */
240 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
241 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
245 /* If this simplifies, use it. */
246 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
250 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
253 /* Likewise, for relational operations.
254 CMP_MODE specifies mode comparison is done in. */
257 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
258 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
262 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
266 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
269 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
270 resulting RTX. Return a new RTX which is as simplified as possible. */
273 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
275 enum rtx_code code
= GET_CODE (x
);
276 enum machine_mode mode
= GET_MODE (x
);
277 enum machine_mode op_mode
;
280 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
281 to build a new expression substituting recursively. If we can't do
282 anything, return our input. */
287 switch (GET_RTX_CLASS (code
))
291 op_mode
= GET_MODE (op0
);
292 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
293 if (op0
== XEXP (x
, 0))
295 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
299 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
300 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_binary (code
, mode
, op0
, op1
);
306 case RTX_COMM_COMPARE
:
309 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
310 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
311 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
314 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
317 case RTX_BITFIELD_OPS
:
319 op_mode
= GET_MODE (op0
);
320 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
321 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
322 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
323 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
325 if (op_mode
== VOIDmode
)
326 op_mode
= GET_MODE (op0
);
327 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
330 /* The only case we try to handle is a SUBREG. */
333 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
334 if (op0
== SUBREG_REG (x
))
336 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
337 GET_MODE (SUBREG_REG (x
)),
339 return op0
? op0
: x
;
346 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
347 if (op0
== XEXP (x
, 0))
349 return replace_equiv_address_nv (x
, op0
);
351 else if (code
== LO_SUM
)
353 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
354 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
356 /* (lo_sum (high x) x) -> x */
357 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
360 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
362 return gen_rtx_LO_SUM (mode
, op0
, op1
);
364 else if (code
== REG
)
366 if (rtx_equal_p (x
, old_rtx
))
377 /* Try to simplify a unary operation CODE whose output mode is to be
378 MODE with input operand OP whose mode was originally OP_MODE.
379 Return zero if no simplification can be made. */
381 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
382 rtx op
, enum machine_mode op_mode
)
386 if (GET_CODE (op
) == CONST
)
389 trueop
= avoid_constant_pool_reference (op
);
391 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
395 return simplify_unary_operation_1 (code
, mode
, op
);
398 /* Perform some simplifications we can do even if the operands
401 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
403 enum rtx_code reversed
;
409 /* (not (not X)) == X. */
410 if (GET_CODE (op
) == NOT
)
413 /* (not (eq X Y)) == (ne X Y), etc. */
414 if (COMPARISON_P (op
)
415 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
416 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
417 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
418 XEXP (op
, 0), XEXP (op
, 1));
420 /* (not (plus X -1)) can become (neg X). */
421 if (GET_CODE (op
) == PLUS
422 && XEXP (op
, 1) == constm1_rtx
)
423 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
425 /* Similarly, (not (neg X)) is (plus X -1). */
426 if (GET_CODE (op
) == NEG
)
427 return plus_constant (XEXP (op
, 0), -1);
429 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
430 if (GET_CODE (op
) == XOR
431 && GET_CODE (XEXP (op
, 1)) == CONST_INT
432 && (temp
= simplify_unary_operation (NOT
, mode
,
433 XEXP (op
, 1), mode
)) != 0)
434 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
436 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
437 if (GET_CODE (op
) == PLUS
438 && GET_CODE (XEXP (op
, 1)) == CONST_INT
439 && mode_signbit_p (mode
, XEXP (op
, 1))
440 && (temp
= simplify_unary_operation (NOT
, mode
,
441 XEXP (op
, 1), mode
)) != 0)
442 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
445 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
446 operands other than 1, but that is not valid. We could do a
447 similar simplification for (not (lshiftrt C X)) where C is
448 just the sign bit, but this doesn't seem common enough to
450 if (GET_CODE (op
) == ASHIFT
451 && XEXP (op
, 0) == const1_rtx
)
453 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
454 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
457 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
458 by reversing the comparison code if valid. */
459 if (STORE_FLAG_VALUE
== -1
461 && (reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
)
462 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
463 XEXP (op
, 0), XEXP (op
, 1));
465 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
466 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
467 so we can perform the above simplification. */
469 if (STORE_FLAG_VALUE
== -1
470 && GET_CODE (op
) == ASHIFTRT
471 && GET_CODE (XEXP (op
, 1)) == CONST_INT
472 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
473 return simplify_gen_relational (GE
, mode
, VOIDmode
,
474 XEXP (op
, 0), const0_rtx
);
479 /* (neg (neg X)) == X. */
480 if (GET_CODE (op
) == NEG
)
483 /* (neg (plus X 1)) can become (not X). */
484 if (GET_CODE (op
) == PLUS
485 && XEXP (op
, 1) == const1_rtx
)
486 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
488 /* Similarly, (neg (not X)) is (plus X 1). */
489 if (GET_CODE (op
) == NOT
)
490 return plus_constant (XEXP (op
, 0), 1);
492 /* (neg (minus X Y)) can become (minus Y X). This transformation
493 isn't safe for modes with signed zeros, since if X and Y are
494 both +0, (minus Y X) is the same as (minus X Y). If the
495 rounding mode is towards +infinity (or -infinity) then the two
496 expressions will be rounded differently. */
497 if (GET_CODE (op
) == MINUS
498 && !HONOR_SIGNED_ZEROS (mode
)
499 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
500 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
502 if (GET_CODE (op
) == PLUS
503 && !HONOR_SIGNED_ZEROS (mode
)
504 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
506 /* (neg (plus A C)) is simplified to (minus -C A). */
507 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
508 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
510 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
512 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
515 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
516 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
517 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
520 /* (neg (mult A B)) becomes (mult (neg A) B).
521 This works even for floating-point values. */
522 if (GET_CODE (op
) == MULT
523 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
525 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
526 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
529 /* NEG commutes with ASHIFT since it is multiplication. Only do
530 this if we can then eliminate the NEG (e.g., if the operand
532 if (GET_CODE (op
) == ASHIFT
)
534 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
536 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
539 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
540 C is equal to the width of MODE minus 1. */
541 if (GET_CODE (op
) == ASHIFTRT
542 && GET_CODE (XEXP (op
, 1)) == CONST_INT
543 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
544 return simplify_gen_binary (LSHIFTRT
, mode
,
545 XEXP (op
, 0), XEXP (op
, 1));
547 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
548 C is equal to the width of MODE minus 1. */
549 if (GET_CODE (op
) == LSHIFTRT
550 && GET_CODE (XEXP (op
, 1)) == CONST_INT
551 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
552 return simplify_gen_binary (ASHIFTRT
, mode
,
553 XEXP (op
, 0), XEXP (op
, 1));
558 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
559 becomes just the MINUS if its mode is MODE. This allows
560 folding switch statements on machines using casesi (such as
562 if (GET_CODE (op
) == TRUNCATE
563 && GET_MODE (XEXP (op
, 0)) == mode
564 && GET_CODE (XEXP (op
, 0)) == MINUS
565 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
566 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
569 /* Check for a sign extension of a subreg of a promoted
570 variable, where the promotion is sign-extended, and the
571 target mode is the same as the variable's promotion. */
572 if (GET_CODE (op
) == SUBREG
573 && SUBREG_PROMOTED_VAR_P (op
)
574 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
575 && GET_MODE (XEXP (op
, 0)) == mode
)
578 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
579 if (! POINTERS_EXTEND_UNSIGNED
580 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
582 || (GET_CODE (op
) == SUBREG
583 && REG_P (SUBREG_REG (op
))
584 && REG_POINTER (SUBREG_REG (op
))
585 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
586 return convert_memory_address (Pmode
, op
);
591 /* Check for a zero extension of a subreg of a promoted
592 variable, where the promotion is zero-extended, and the
593 target mode is the same as the variable's promotion. */
594 if (GET_CODE (op
) == SUBREG
595 && SUBREG_PROMOTED_VAR_P (op
)
596 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
597 && GET_MODE (XEXP (op
, 0)) == mode
)
600 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
601 if (POINTERS_EXTEND_UNSIGNED
> 0
602 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
604 || (GET_CODE (op
) == SUBREG
605 && REG_P (SUBREG_REG (op
))
606 && REG_POINTER (SUBREG_REG (op
))
607 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
608 return convert_memory_address (Pmode
, op
);
619 /* Try to compute the value of a unary operation CODE whose output mode is to
620 be MODE with input operand OP whose mode was originally OP_MODE.
621 Return zero if the value cannot be computed. */
623 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
624 rtx op
, enum machine_mode op_mode
)
626 unsigned int width
= GET_MODE_BITSIZE (mode
);
628 if (code
== VEC_DUPLICATE
)
630 gcc_assert (VECTOR_MODE_P (mode
));
631 if (GET_MODE (op
) != VOIDmode
)
633 if (!VECTOR_MODE_P (GET_MODE (op
)))
634 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
636 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
639 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
640 || GET_CODE (op
) == CONST_VECTOR
)
642 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
643 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
644 rtvec v
= rtvec_alloc (n_elts
);
647 if (GET_CODE (op
) != CONST_VECTOR
)
648 for (i
= 0; i
< n_elts
; i
++)
649 RTVEC_ELT (v
, i
) = op
;
652 enum machine_mode inmode
= GET_MODE (op
);
653 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
654 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
656 gcc_assert (in_n_elts
< n_elts
);
657 gcc_assert ((n_elts
% in_n_elts
) == 0);
658 for (i
= 0; i
< n_elts
; i
++)
659 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
661 return gen_rtx_CONST_VECTOR (mode
, v
);
665 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
667 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
668 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
669 enum machine_mode opmode
= GET_MODE (op
);
670 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
671 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
672 rtvec v
= rtvec_alloc (n_elts
);
675 gcc_assert (op_n_elts
== n_elts
);
676 for (i
= 0; i
< n_elts
; i
++)
678 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
679 CONST_VECTOR_ELT (op
, i
),
680 GET_MODE_INNER (opmode
));
683 RTVEC_ELT (v
, i
) = x
;
685 return gen_rtx_CONST_VECTOR (mode
, v
);
688 /* The order of these tests is critical so that, for example, we don't
689 check the wrong mode (input vs. output) for a conversion operation,
690 such as FIX. At some point, this should be simplified. */
692 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
693 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
695 HOST_WIDE_INT hv
, lv
;
698 if (GET_CODE (op
) == CONST_INT
)
699 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
701 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
703 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
704 d
= real_value_truncate (mode
, d
);
705 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
707 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
708 && (GET_CODE (op
) == CONST_DOUBLE
709 || GET_CODE (op
) == CONST_INT
))
711 HOST_WIDE_INT hv
, lv
;
714 if (GET_CODE (op
) == CONST_INT
)
715 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
717 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
719 if (op_mode
== VOIDmode
)
721 /* We don't know how to interpret negative-looking numbers in
722 this case, so don't try to fold those. */
726 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
729 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
731 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
732 d
= real_value_truncate (mode
, d
);
733 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
736 if (GET_CODE (op
) == CONST_INT
737 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
739 HOST_WIDE_INT arg0
= INTVAL (op
);
753 val
= (arg0
>= 0 ? arg0
: - arg0
);
757 /* Don't use ffs here. Instead, get low order bit and then its
758 number. If arg0 is zero, this will return 0, as desired. */
759 arg0
&= GET_MODE_MASK (mode
);
760 val
= exact_log2 (arg0
& (- arg0
)) + 1;
764 arg0
&= GET_MODE_MASK (mode
);
765 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
768 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
772 arg0
&= GET_MODE_MASK (mode
);
775 /* Even if the value at zero is undefined, we have to come
776 up with some replacement. Seems good enough. */
777 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
778 val
= GET_MODE_BITSIZE (mode
);
781 val
= exact_log2 (arg0
& -arg0
);
785 arg0
&= GET_MODE_MASK (mode
);
788 val
++, arg0
&= arg0
- 1;
792 arg0
&= GET_MODE_MASK (mode
);
795 val
++, arg0
&= arg0
- 1;
804 /* When zero-extending a CONST_INT, we need to know its
806 gcc_assert (op_mode
!= VOIDmode
);
807 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
809 /* If we were really extending the mode,
810 we would have to distinguish between zero-extension
811 and sign-extension. */
812 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
815 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
816 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
822 if (op_mode
== VOIDmode
)
824 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
826 /* If we were really extending the mode,
827 we would have to distinguish between zero-extension
828 and sign-extension. */
829 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
832 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
835 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
837 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
838 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
855 return gen_int_mode (val
, mode
);
858 /* We can do some operations on integer CONST_DOUBLEs. Also allow
859 for a DImode operation on a CONST_INT. */
860 else if (GET_MODE (op
) == VOIDmode
861 && width
<= HOST_BITS_PER_WIDE_INT
* 2
862 && (GET_CODE (op
) == CONST_DOUBLE
863 || GET_CODE (op
) == CONST_INT
))
865 unsigned HOST_WIDE_INT l1
, lv
;
866 HOST_WIDE_INT h1
, hv
;
868 if (GET_CODE (op
) == CONST_DOUBLE
)
869 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
871 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
881 neg_double (l1
, h1
, &lv
, &hv
);
886 neg_double (l1
, h1
, &lv
, &hv
);
898 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
901 lv
= exact_log2 (l1
& -l1
) + 1;
907 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
908 - HOST_BITS_PER_WIDE_INT
;
910 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
911 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
912 lv
= GET_MODE_BITSIZE (mode
);
918 lv
= exact_log2 (l1
& -l1
);
920 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
921 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
922 lv
= GET_MODE_BITSIZE (mode
);
945 /* This is just a change-of-mode, so do nothing. */
950 gcc_assert (op_mode
!= VOIDmode
);
952 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
956 lv
= l1
& GET_MODE_MASK (op_mode
);
960 if (op_mode
== VOIDmode
961 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
965 lv
= l1
& GET_MODE_MASK (op_mode
);
966 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
967 && (lv
& ((HOST_WIDE_INT
) 1
968 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
969 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
971 hv
= HWI_SIGN_EXTEND (lv
);
982 return immed_double_const (lv
, hv
, mode
);
985 else if (GET_CODE (op
) == CONST_DOUBLE
986 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
988 REAL_VALUE_TYPE d
, t
;
989 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
994 if (HONOR_SNANS (mode
) && real_isnan (&d
))
996 real_sqrt (&t
, mode
, &d
);
1000 d
= REAL_VALUE_ABS (d
);
1003 d
= REAL_VALUE_NEGATE (d
);
1005 case FLOAT_TRUNCATE
:
1006 d
= real_value_truncate (mode
, d
);
1009 /* All this does is change the mode. */
1012 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1019 real_to_target (tmp
, &d
, GET_MODE (op
));
1020 for (i
= 0; i
< 4; i
++)
1022 real_from_target (&d
, tmp
, mode
);
1028 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1031 else if (GET_CODE (op
) == CONST_DOUBLE
1032 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
1033 && GET_MODE_CLASS (mode
) == MODE_INT
1034 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1036 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1037 operators are intentionally left unspecified (to ease implementation
1038 by target backends), for consistency, this routine implements the
1039 same semantics for constant folding as used by the middle-end. */
1041 /* This was formerly used only for non-IEEE float.
1042 eggert@twinsun.com says it is safe for IEEE also. */
1043 HOST_WIDE_INT xh
, xl
, th
, tl
;
1044 REAL_VALUE_TYPE x
, t
;
1045 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1049 if (REAL_VALUE_ISNAN (x
))
1052 /* Test against the signed upper bound. */
1053 if (width
> HOST_BITS_PER_WIDE_INT
)
1055 th
= ((unsigned HOST_WIDE_INT
) 1
1056 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1062 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1064 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1065 if (REAL_VALUES_LESS (t
, x
))
1072 /* Test against the signed lower bound. */
1073 if (width
> HOST_BITS_PER_WIDE_INT
)
1075 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1081 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1083 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1084 if (REAL_VALUES_LESS (x
, t
))
1090 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1094 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1097 /* Test against the unsigned upper bound. */
1098 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1103 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1105 th
= ((unsigned HOST_WIDE_INT
) 1
1106 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1112 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1114 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1115 if (REAL_VALUES_LESS (t
, x
))
1122 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1128 return immed_double_const (xl
, xh
, mode
);
1134 /* Subroutine of simplify_binary_operation to simplify a commutative,
1135 associative binary operation CODE with result mode MODE, operating
1136 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1137 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1138 canonicalization is possible. */
1141 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1146 /* Linearize the operator to the left. */
1147 if (GET_CODE (op1
) == code
)
1149 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1150 if (GET_CODE (op0
) == code
)
1152 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1153 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1156 /* "a op (b op c)" becomes "(b op c) op a". */
1157 if (! swap_commutative_operands_p (op1
, op0
))
1158 return simplify_gen_binary (code
, mode
, op1
, op0
);
1165 if (GET_CODE (op0
) == code
)
1167 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1168 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1170 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1171 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1174 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1175 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1176 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1177 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1179 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1181 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1182 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1183 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1184 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1186 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1193 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1194 and OP1. Return 0 if no simplification is possible.
1196 Don't use this for relational operations such as EQ or LT.
1197 Use simplify_relational_operation instead. */
1199 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1202 rtx trueop0
, trueop1
;
1205 /* Relational operations don't work here. We must know the mode
1206 of the operands in order to do the comparison correctly.
1207 Assuming a full word can give incorrect results.
1208 Consider comparing 128 with -128 in QImode. */
1209 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1210 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1212 /* Make sure the constant is second. */
1213 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1214 && swap_commutative_operands_p (op0
, op1
))
1216 tem
= op0
, op0
= op1
, op1
= tem
;
1219 trueop0
= avoid_constant_pool_reference (op0
);
1220 trueop1
= avoid_constant_pool_reference (op1
);
1222 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1225 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1229 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1230 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1234 unsigned int width
= GET_MODE_BITSIZE (mode
);
1236 /* Even if we can't compute a constant result,
1237 there are some cases worth simplifying. */
1242 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1243 when x is NaN, infinite, or finite and nonzero. They aren't
1244 when x is -0 and the rounding mode is not towards -infinity,
1245 since (-0) + 0 is then 0. */
1246 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1249 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1250 transformations are safe even for IEEE. */
1251 if (GET_CODE (op0
) == NEG
)
1252 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1253 else if (GET_CODE (op1
) == NEG
)
1254 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1256 /* (~a) + 1 -> -a */
1257 if (INTEGRAL_MODE_P (mode
)
1258 && GET_CODE (op0
) == NOT
1259 && trueop1
== const1_rtx
)
1260 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1262 /* Handle both-operands-constant cases. We can only add
1263 CONST_INTs to constants since the sum of relocatable symbols
1264 can't be handled by most assemblers. Don't add CONST_INT
1265 to CONST_INT since overflow won't be computed properly if wider
1266 than HOST_BITS_PER_WIDE_INT. */
1268 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1269 && GET_CODE (op1
) == CONST_INT
)
1270 return plus_constant (op0
, INTVAL (op1
));
1271 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1272 && GET_CODE (op0
) == CONST_INT
)
1273 return plus_constant (op1
, INTVAL (op0
));
1275 /* See if this is something like X * C - X or vice versa or
1276 if the multiplication is written as a shift. If so, we can
1277 distribute and make a new multiply, shift, or maybe just
1278 have X (if C is 2 in the example above). But don't make
1279 something more expensive than we had before. */
1281 if (SCALAR_INT_MODE_P (mode
))
1283 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1284 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1285 rtx lhs
= op0
, rhs
= op1
;
1287 if (GET_CODE (lhs
) == NEG
)
1291 lhs
= XEXP (lhs
, 0);
1293 else if (GET_CODE (lhs
) == MULT
1294 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1296 coeff0l
= INTVAL (XEXP (lhs
, 1));
1297 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1298 lhs
= XEXP (lhs
, 0);
1300 else if (GET_CODE (lhs
) == ASHIFT
1301 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1302 && INTVAL (XEXP (lhs
, 1)) >= 0
1303 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1305 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1307 lhs
= XEXP (lhs
, 0);
1310 if (GET_CODE (rhs
) == NEG
)
1314 rhs
= XEXP (rhs
, 0);
1316 else if (GET_CODE (rhs
) == MULT
1317 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1319 coeff1l
= INTVAL (XEXP (rhs
, 1));
1320 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1321 rhs
= XEXP (rhs
, 0);
1323 else if (GET_CODE (rhs
) == ASHIFT
1324 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1325 && INTVAL (XEXP (rhs
, 1)) >= 0
1326 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1328 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1330 rhs
= XEXP (rhs
, 0);
1333 if (rtx_equal_p (lhs
, rhs
))
1335 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1337 unsigned HOST_WIDE_INT l
;
1340 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1341 coeff
= immed_double_const (l
, h
, mode
);
1343 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1344 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1349 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1350 if ((GET_CODE (op1
) == CONST_INT
1351 || GET_CODE (op1
) == CONST_DOUBLE
)
1352 && GET_CODE (op0
) == XOR
1353 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1354 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1355 && mode_signbit_p (mode
, op1
))
1356 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1357 simplify_gen_binary (XOR
, mode
, op1
,
1360 /* If one of the operands is a PLUS or a MINUS, see if we can
1361 simplify this by the associative law.
1362 Don't use the associative law for floating point.
1363 The inaccuracy makes it nonassociative,
1364 and subtle programs can break if operations are associated. */
1366 if (INTEGRAL_MODE_P (mode
)
1367 && (plus_minus_operand_p (op0
)
1368 || plus_minus_operand_p (op1
))
1369 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1372 /* Reassociate floating point addition only when the user
1373 specifies unsafe math optimizations. */
1374 if (FLOAT_MODE_P (mode
)
1375 && flag_unsafe_math_optimizations
)
1377 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1385 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1386 using cc0, in which case we want to leave it as a COMPARE
1387 so we can distinguish it from a register-register-copy.
1389 In IEEE floating point, x-0 is not the same as x. */
1391 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1392 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1393 && trueop1
== CONST0_RTX (mode
))
1397 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1398 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1399 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1400 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1402 rtx xop00
= XEXP (op0
, 0);
1403 rtx xop10
= XEXP (op1
, 0);
1406 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1408 if (REG_P (xop00
) && REG_P (xop10
)
1409 && GET_MODE (xop00
) == GET_MODE (xop10
)
1410 && REGNO (xop00
) == REGNO (xop10
)
1411 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1412 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1419 /* We can't assume x-x is 0 even with non-IEEE floating point,
1420 but since it is zero except in very strange circumstances, we
1421 will treat it as zero with -funsafe-math-optimizations. */
1422 if (rtx_equal_p (trueop0
, trueop1
)
1423 && ! side_effects_p (op0
)
1424 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1425 return CONST0_RTX (mode
);
1427 /* Change subtraction from zero into negation. (0 - x) is the
1428 same as -x when x is NaN, infinite, or finite and nonzero.
1429 But if the mode has signed zeros, and does not round towards
1430 -infinity, then 0 - 0 is 0, not -0. */
1431 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1432 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1434 /* (-1 - a) is ~a. */
1435 if (trueop0
== constm1_rtx
)
1436 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1438 /* Subtracting 0 has no effect unless the mode has signed zeros
1439 and supports rounding towards -infinity. In such a case,
1441 if (!(HONOR_SIGNED_ZEROS (mode
)
1442 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1443 && trueop1
== CONST0_RTX (mode
))
1446 /* See if this is something like X * C - X or vice versa or
1447 if the multiplication is written as a shift. If so, we can
1448 distribute and make a new multiply, shift, or maybe just
1449 have X (if C is 2 in the example above). But don't make
1450 something more expensive than we had before. */
1452 if (SCALAR_INT_MODE_P (mode
))
1454 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1455 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1456 rtx lhs
= op0
, rhs
= op1
;
1458 if (GET_CODE (lhs
) == NEG
)
1462 lhs
= XEXP (lhs
, 0);
1464 else if (GET_CODE (lhs
) == MULT
1465 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1467 coeff0l
= INTVAL (XEXP (lhs
, 1));
1468 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1469 lhs
= XEXP (lhs
, 0);
1471 else if (GET_CODE (lhs
) == ASHIFT
1472 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1473 && INTVAL (XEXP (lhs
, 1)) >= 0
1474 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1476 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1478 lhs
= XEXP (lhs
, 0);
1481 if (GET_CODE (rhs
) == NEG
)
1485 rhs
= XEXP (rhs
, 0);
1487 else if (GET_CODE (rhs
) == MULT
1488 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1490 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1491 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1492 rhs
= XEXP (rhs
, 0);
1494 else if (GET_CODE (rhs
) == ASHIFT
1495 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1496 && INTVAL (XEXP (rhs
, 1)) >= 0
1497 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1499 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1501 rhs
= XEXP (rhs
, 0);
1504 if (rtx_equal_p (lhs
, rhs
))
1506 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1508 unsigned HOST_WIDE_INT l
;
1511 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1512 coeff
= immed_double_const (l
, h
, mode
);
1514 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1515 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1520 /* (a - (-b)) -> (a + b). True even for IEEE. */
1521 if (GET_CODE (op1
) == NEG
)
1522 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1524 /* (-x - c) may be simplified as (-c - x). */
1525 if (GET_CODE (op0
) == NEG
1526 && (GET_CODE (op1
) == CONST_INT
1527 || GET_CODE (op1
) == CONST_DOUBLE
))
1529 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1531 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1534 /* If one of the operands is a PLUS or a MINUS, see if we can
1535 simplify this by the associative law.
1536 Don't use the associative law for floating point.
1537 The inaccuracy makes it nonassociative,
1538 and subtle programs can break if operations are associated. */
1540 if (INTEGRAL_MODE_P (mode
)
1541 && (plus_minus_operand_p (op0
)
1542 || plus_minus_operand_p (op1
))
1543 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1546 /* Don't let a relocatable value get a negative coeff. */
1547 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1548 return simplify_gen_binary (PLUS
, mode
,
1550 neg_const_int (mode
, op1
));
1552 /* (x - (x & y)) -> (x & ~y) */
1553 if (GET_CODE (op1
) == AND
)
1555 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1557 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1558 GET_MODE (XEXP (op1
, 1)));
1559 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1561 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1563 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1564 GET_MODE (XEXP (op1
, 0)));
1565 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1571 if (trueop1
== constm1_rtx
)
1572 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1574 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1575 x is NaN, since x * 0 is then also NaN. Nor is it valid
1576 when the mode has signed zeros, since multiplying a negative
1577 number by 0 will give -0, not 0. */
1578 if (!HONOR_NANS (mode
)
1579 && !HONOR_SIGNED_ZEROS (mode
)
1580 && trueop1
== CONST0_RTX (mode
)
1581 && ! side_effects_p (op0
))
1584 /* In IEEE floating point, x*1 is not equivalent to x for
1586 if (!HONOR_SNANS (mode
)
1587 && trueop1
== CONST1_RTX (mode
))
1590 /* Convert multiply by constant power of two into shift unless
1591 we are still generating RTL. This test is a kludge. */
1592 if (GET_CODE (trueop1
) == CONST_INT
1593 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1594 /* If the mode is larger than the host word size, and the
1595 uppermost bit is set, then this isn't a power of two due
1596 to implicit sign extension. */
1597 && (width
<= HOST_BITS_PER_WIDE_INT
1598 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1599 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1601 /* Likewise for multipliers wider than a word. */
1602 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1603 && (GET_MODE (trueop1
) == VOIDmode
1604 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1605 && GET_MODE (op0
) == mode
1606 && CONST_DOUBLE_LOW (trueop1
) == 0
1607 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1608 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1609 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1611 /* x*2 is x+x and x*(-1) is -x */
1612 if (GET_CODE (trueop1
) == CONST_DOUBLE
1613 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1614 && GET_MODE (op0
) == mode
)
1617 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1619 if (REAL_VALUES_EQUAL (d
, dconst2
))
1620 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1622 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1623 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1626 /* Reassociate multiplication, but for floating point MULTs
1627 only when the user specifies unsafe math optimizations. */
1628 if (! FLOAT_MODE_P (mode
)
1629 || flag_unsafe_math_optimizations
)
1631 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1638 if (trueop1
== const0_rtx
)
1640 if (GET_CODE (trueop1
) == CONST_INT
1641 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1642 == GET_MODE_MASK (mode
)))
1644 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1646 /* A | (~A) -> -1 */
1647 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1648 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1649 && ! side_effects_p (op0
)
1650 && SCALAR_INT_MODE_P (mode
))
1652 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1658 if (trueop1
== const0_rtx
)
1660 if (GET_CODE (trueop1
) == CONST_INT
1661 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1662 == GET_MODE_MASK (mode
)))
1663 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1664 if (rtx_equal_p (trueop0
, trueop1
)
1665 && ! side_effects_p (op0
)
1666 && GET_MODE_CLASS (mode
) != MODE_CC
)
1667 return CONST0_RTX (mode
);
1669 /* Canonicalize XOR of the most significant bit to PLUS. */
1670 if ((GET_CODE (op1
) == CONST_INT
1671 || GET_CODE (op1
) == CONST_DOUBLE
)
1672 && mode_signbit_p (mode
, op1
))
1673 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1674 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1675 if ((GET_CODE (op1
) == CONST_INT
1676 || GET_CODE (op1
) == CONST_DOUBLE
)
1677 && GET_CODE (op0
) == PLUS
1678 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1679 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1680 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1681 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1682 simplify_gen_binary (XOR
, mode
, op1
,
1685 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1691 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1693 /* If we are turning off bits already known off in OP0, we need
1695 if (GET_CODE (trueop1
) == CONST_INT
1696 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1697 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1699 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
1700 && GET_MODE_CLASS (mode
) != MODE_CC
)
1703 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1704 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1705 && ! side_effects_p (op0
)
1706 && GET_MODE_CLASS (mode
) != MODE_CC
)
1707 return CONST0_RTX (mode
);
1709 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1710 there are no nonzero bits of C outside of X's mode. */
1711 if ((GET_CODE (op0
) == SIGN_EXTEND
1712 || GET_CODE (op0
) == ZERO_EXTEND
)
1713 && GET_CODE (trueop1
) == CONST_INT
1714 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1715 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
1716 & INTVAL (trueop1
)) == 0)
1718 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
1719 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
1720 gen_int_mode (INTVAL (trueop1
),
1722 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
1725 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1726 ((A & N) + B) & M -> (A + B) & M
1727 Similarly if (N & M) == 0,
1728 ((A | N) + B) & M -> (A + B) & M
1729 and for - instead of + and/or ^ instead of |. */
1730 if (GET_CODE (trueop1
) == CONST_INT
1731 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1732 && ~INTVAL (trueop1
)
1733 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1734 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1739 pmop
[0] = XEXP (op0
, 0);
1740 pmop
[1] = XEXP (op0
, 1);
1742 for (which
= 0; which
< 2; which
++)
1745 switch (GET_CODE (tem
))
1748 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1749 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1750 == INTVAL (trueop1
))
1751 pmop
[which
] = XEXP (tem
, 0);
1755 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1756 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1757 pmop
[which
] = XEXP (tem
, 0);
1764 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1766 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1768 return simplify_gen_binary (code
, mode
, tem
, op1
);
1771 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1777 /* 0/x is 0 (or x&0 if x has side-effects). */
1778 if (trueop0
== CONST0_RTX (mode
))
1780 if (side_effects_p (op1
))
1781 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1785 if (trueop1
== CONST1_RTX (mode
))
1786 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1787 /* Convert divide by power of two into shift. */
1788 if (GET_CODE (trueop1
) == CONST_INT
1789 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
1790 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
1794 /* Handle floating point and integers separately. */
1795 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1797 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1798 safe for modes with NaNs, since 0.0 / 0.0 will then be
1799 NaN rather than 0.0. Nor is it safe for modes with signed
1800 zeros, since dividing 0 by a negative number gives -0.0 */
1801 if (trueop0
== CONST0_RTX (mode
)
1802 && !HONOR_NANS (mode
)
1803 && !HONOR_SIGNED_ZEROS (mode
)
1804 && ! side_effects_p (op1
))
1807 if (trueop1
== CONST1_RTX (mode
)
1808 && !HONOR_SNANS (mode
))
1811 if (GET_CODE (trueop1
) == CONST_DOUBLE
1812 && trueop1
!= CONST0_RTX (mode
))
1815 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1818 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1819 && !HONOR_SNANS (mode
))
1820 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1822 /* Change FP division by a constant into multiplication.
1823 Only do this with -funsafe-math-optimizations. */
1824 if (flag_unsafe_math_optimizations
1825 && !REAL_VALUES_EQUAL (d
, dconst0
))
1827 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
1828 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1829 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1835 /* 0/x is 0 (or x&0 if x has side-effects). */
1836 if (trueop0
== CONST0_RTX (mode
))
1838 if (side_effects_p (op1
))
1839 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1843 if (trueop1
== CONST1_RTX (mode
))
1844 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1846 if (trueop1
== constm1_rtx
)
1848 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1849 return simplify_gen_unary (NEG
, mode
, x
, mode
);
1855 /* 0%x is 0 (or x&0 if x has side-effects). */
1856 if (trueop0
== CONST0_RTX (mode
))
1858 if (side_effects_p (op1
))
1859 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1862 /* x%1 is 0 (of x&0 if x has side-effects). */
1863 if (trueop1
== CONST1_RTX (mode
))
1865 if (side_effects_p (op0
))
1866 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1867 return CONST0_RTX (mode
);
1869 /* Implement modulus by power of two as AND. */
1870 if (GET_CODE (trueop1
) == CONST_INT
1871 && exact_log2 (INTVAL (trueop1
)) > 0)
1872 return simplify_gen_binary (AND
, mode
, op0
,
1873 GEN_INT (INTVAL (op1
) - 1));
1877 /* 0%x is 0 (or x&0 if x has side-effects). */
1878 if (trueop0
== CONST0_RTX (mode
))
1880 if (side_effects_p (op1
))
1881 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1884 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1885 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
1887 if (side_effects_p (op0
))
1888 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1889 return CONST0_RTX (mode
);
1896 /* Rotating ~0 always results in ~0. */
1897 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1898 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1899 && ! side_effects_p (op1
))
1902 /* Fall through.... */
1906 if (trueop1
== CONST0_RTX (mode
))
1908 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
1913 if (width
<= HOST_BITS_PER_WIDE_INT
1914 && GET_CODE (trueop1
) == CONST_INT
1915 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1916 && ! side_effects_p (op0
))
1918 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1920 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1926 if (width
<= HOST_BITS_PER_WIDE_INT
1927 && GET_CODE (trueop1
) == CONST_INT
1928 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1929 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1930 && ! side_effects_p (op0
))
1932 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1934 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1940 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1942 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1944 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1950 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1952 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1954 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1963 /* ??? There are simplifications that can be done. */
1967 if (!VECTOR_MODE_P (mode
))
1969 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1970 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
1971 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1972 gcc_assert (XVECLEN (trueop1
, 0) == 1);
1973 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
1975 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1976 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
1981 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1982 gcc_assert (GET_MODE_INNER (mode
)
1983 == GET_MODE_INNER (GET_MODE (trueop0
)));
1984 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1986 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1988 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1989 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1990 rtvec v
= rtvec_alloc (n_elts
);
1993 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
1994 for (i
= 0; i
< n_elts
; i
++)
1996 rtx x
= XVECEXP (trueop1
, 0, i
);
1998 gcc_assert (GET_CODE (x
) == CONST_INT
);
1999 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2003 return gen_rtx_CONST_VECTOR (mode
, v
);
2009 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2010 ? GET_MODE (trueop0
)
2011 : GET_MODE_INNER (mode
));
2012 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2013 ? GET_MODE (trueop1
)
2014 : GET_MODE_INNER (mode
));
2016 gcc_assert (VECTOR_MODE_P (mode
));
2017 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2018 == GET_MODE_SIZE (mode
));
2020 if (VECTOR_MODE_P (op0_mode
))
2021 gcc_assert (GET_MODE_INNER (mode
)
2022 == GET_MODE_INNER (op0_mode
));
2024 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2026 if (VECTOR_MODE_P (op1_mode
))
2027 gcc_assert (GET_MODE_INNER (mode
)
2028 == GET_MODE_INNER (op1_mode
));
2030 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2032 if ((GET_CODE (trueop0
) == CONST_VECTOR
2033 || GET_CODE (trueop0
) == CONST_INT
2034 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2035 && (GET_CODE (trueop1
) == CONST_VECTOR
2036 || GET_CODE (trueop1
) == CONST_INT
2037 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2039 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2040 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2041 rtvec v
= rtvec_alloc (n_elts
);
2043 unsigned in_n_elts
= 1;
2045 if (VECTOR_MODE_P (op0_mode
))
2046 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2047 for (i
= 0; i
< n_elts
; i
++)
2051 if (!VECTOR_MODE_P (op0_mode
))
2052 RTVEC_ELT (v
, i
) = trueop0
;
2054 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2058 if (!VECTOR_MODE_P (op1_mode
))
2059 RTVEC_ELT (v
, i
) = trueop1
;
2061 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2066 return gen_rtx_CONST_VECTOR (mode
, v
);
2079 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2082 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2084 unsigned int width
= GET_MODE_BITSIZE (mode
);
2086 if (VECTOR_MODE_P (mode
)
2087 && code
!= VEC_CONCAT
2088 && GET_CODE (op0
) == CONST_VECTOR
2089 && GET_CODE (op1
) == CONST_VECTOR
)
2091 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2092 enum machine_mode op0mode
= GET_MODE (op0
);
2093 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2094 enum machine_mode op1mode
= GET_MODE (op1
);
2095 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2096 rtvec v
= rtvec_alloc (n_elts
);
2099 gcc_assert (op0_n_elts
== n_elts
);
2100 gcc_assert (op1_n_elts
== n_elts
);
2101 for (i
= 0; i
< n_elts
; i
++)
2103 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2104 CONST_VECTOR_ELT (op0
, i
),
2105 CONST_VECTOR_ELT (op1
, i
));
2108 RTVEC_ELT (v
, i
) = x
;
2111 return gen_rtx_CONST_VECTOR (mode
, v
);
2114 if (VECTOR_MODE_P (mode
)
2115 && code
== VEC_CONCAT
2116 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2118 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2119 rtvec v
= rtvec_alloc (n_elts
);
2121 gcc_assert (n_elts
>= 2);
2124 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2125 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2127 RTVEC_ELT (v
, 0) = op0
;
2128 RTVEC_ELT (v
, 1) = op1
;
2132 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2133 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2136 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2137 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2138 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2140 for (i
= 0; i
< op0_n_elts
; ++i
)
2141 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2142 for (i
= 0; i
< op1_n_elts
; ++i
)
2143 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2146 return gen_rtx_CONST_VECTOR (mode
, v
);
2149 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2150 && GET_CODE (op0
) == CONST_DOUBLE
2151 && GET_CODE (op1
) == CONST_DOUBLE
2152 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2163 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2165 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2167 for (i
= 0; i
< 4; i
++)
2184 real_from_target (&r
, tmp0
, mode
);
2185 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2189 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2192 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2193 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2194 real_convert (&f0
, mode
, &f0
);
2195 real_convert (&f1
, mode
, &f1
);
2197 if (HONOR_SNANS (mode
)
2198 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2202 && REAL_VALUES_EQUAL (f1
, dconst0
)
2203 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2206 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2207 && flag_trapping_math
2208 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2210 int s0
= REAL_VALUE_NEGATIVE (f0
);
2211 int s1
= REAL_VALUE_NEGATIVE (f1
);
2216 /* Inf + -Inf = NaN plus exception. */
2221 /* Inf - Inf = NaN plus exception. */
2226 /* Inf / Inf = NaN plus exception. */
2233 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2234 && flag_trapping_math
2235 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2236 || (REAL_VALUE_ISINF (f1
)
2237 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2238 /* Inf * 0 = NaN plus exception. */
2241 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2243 real_convert (&result
, mode
, &value
);
2245 /* Don't constant fold this floating point operation if
2246 the result has overflowed and flag_trapping_math. */
2248 if (flag_trapping_math
2249 && MODE_HAS_INFINITIES (mode
)
2250 && REAL_VALUE_ISINF (result
)
2251 && !REAL_VALUE_ISINF (f0
)
2252 && !REAL_VALUE_ISINF (f1
))
2253 /* Overflow plus exception. */
2256 /* Don't constant fold this floating point operation if the
2257 result may dependent upon the run-time rounding mode and
2258 flag_rounding_math is set, or if GCC's software emulation
2259 is unable to accurately represent the result. */
2261 if ((flag_rounding_math
2262 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2263 && !flag_unsafe_math_optimizations
))
2264 && (inexact
|| !real_identical (&result
, &value
)))
2267 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2271 /* We can fold some multi-word operations. */
2272 if (GET_MODE_CLASS (mode
) == MODE_INT
2273 && width
== HOST_BITS_PER_WIDE_INT
* 2
2274 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2275 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2277 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2278 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2280 if (GET_CODE (op0
) == CONST_DOUBLE
)
2281 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2283 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2285 if (GET_CODE (op1
) == CONST_DOUBLE
)
2286 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2288 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2293 /* A - B == A + (-B). */
2294 neg_double (l2
, h2
, &lv
, &hv
);
2297 /* Fall through.... */
2300 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2304 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2308 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2309 &lv
, &hv
, <
, &ht
))
2314 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2315 <
, &ht
, &lv
, &hv
))
2320 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2321 &lv
, &hv
, <
, &ht
))
2326 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2327 <
, &ht
, &lv
, &hv
))
2332 lv
= l1
& l2
, hv
= h1
& h2
;
2336 lv
= l1
| l2
, hv
= h1
| h2
;
2340 lv
= l1
^ l2
, hv
= h1
^ h2
;
2346 && ((unsigned HOST_WIDE_INT
) l1
2347 < (unsigned HOST_WIDE_INT
) l2
)))
2356 && ((unsigned HOST_WIDE_INT
) l1
2357 > (unsigned HOST_WIDE_INT
) l2
)))
2364 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2366 && ((unsigned HOST_WIDE_INT
) l1
2367 < (unsigned HOST_WIDE_INT
) l2
)))
2374 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2376 && ((unsigned HOST_WIDE_INT
) l1
2377 > (unsigned HOST_WIDE_INT
) l2
)))
2383 case LSHIFTRT
: case ASHIFTRT
:
2385 case ROTATE
: case ROTATERT
:
2386 if (SHIFT_COUNT_TRUNCATED
)
2387 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2389 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2392 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2393 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2395 else if (code
== ASHIFT
)
2396 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2397 else if (code
== ROTATE
)
2398 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2399 else /* code == ROTATERT */
2400 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2407 return immed_double_const (lv
, hv
, mode
);
2410 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
2411 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
2413 /* Get the integer argument values in two forms:
2414 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2416 arg0
= INTVAL (op0
);
2417 arg1
= INTVAL (op1
);
2419 if (width
< HOST_BITS_PER_WIDE_INT
)
2421 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2422 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2425 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2426 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2429 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2430 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2438 /* Compute the value of the arithmetic. */
2443 val
= arg0s
+ arg1s
;
2447 val
= arg0s
- arg1s
;
2451 val
= arg0s
* arg1s
;
2456 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2459 val
= arg0s
/ arg1s
;
2464 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2467 val
= arg0s
% arg1s
;
2472 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2475 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2480 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2483 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2501 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2502 the value is in range. We can't return any old value for
2503 out-of-range arguments because either the middle-end (via
2504 shift_truncation_mask) or the back-end might be relying on
2505 target-specific knowledge. Nor can we rely on
2506 shift_truncation_mask, since the shift might not be part of an
2507 ashlM3, lshrM3 or ashrM3 instruction. */
2508 if (SHIFT_COUNT_TRUNCATED
)
2509 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2510 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2513 val
= (code
== ASHIFT
2514 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2515 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2517 /* Sign-extend the result for arithmetic right shifts. */
2518 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2519 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2527 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2528 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2536 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2537 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2541 /* Do nothing here. */
2545 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2549 val
= ((unsigned HOST_WIDE_INT
) arg0
2550 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2554 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2558 val
= ((unsigned HOST_WIDE_INT
) arg0
2559 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2566 /* ??? There are simplifications that can be done. */
2573 return gen_int_mode (val
, mode
);
2581 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2584 Rather than test for specific case, we do this by a brute-force method
2585 and do all possible simplifications until no more changes occur. Then
2586 we rebuild the operation.
2588 If FORCE is true, then always generate the rtx. This is used to
2589 canonicalize stuff emitted from simplify_gen_binary. Note that this
2590 can still fail if the rtx is too complex. It won't fail just because
2591 the result is not 'simpler' than the input, however. */
2593 struct simplify_plus_minus_op_data
2601 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2603 const struct simplify_plus_minus_op_data
*d1
= p1
;
2604 const struct simplify_plus_minus_op_data
*d2
= p2
;
2607 result
= (commutative_operand_precedence (d2
->op
)
2608 - commutative_operand_precedence (d1
->op
));
2611 return d1
->ix
- d2
->ix
;
2615 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2618 struct simplify_plus_minus_op_data ops
[8];
2620 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2624 memset (ops
, 0, sizeof ops
);
2626 /* Set up the two operands and then expand them until nothing has been
2627 changed. If we run out of room in our array, give up; this should
2628 almost never happen. */
2633 ops
[1].neg
= (code
== MINUS
);
2639 for (i
= 0; i
< n_ops
; i
++)
2641 rtx this_op
= ops
[i
].op
;
2642 int this_neg
= ops
[i
].neg
;
2643 enum rtx_code this_code
= GET_CODE (this_op
);
2652 ops
[n_ops
].op
= XEXP (this_op
, 1);
2653 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2656 ops
[i
].op
= XEXP (this_op
, 0);
2662 ops
[i
].op
= XEXP (this_op
, 0);
2663 ops
[i
].neg
= ! this_neg
;
2669 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2670 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2671 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2673 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2674 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2675 ops
[n_ops
].neg
= this_neg
;
2683 /* ~a -> (-a - 1) */
2686 ops
[n_ops
].op
= constm1_rtx
;
2687 ops
[n_ops
++].neg
= this_neg
;
2688 ops
[i
].op
= XEXP (this_op
, 0);
2689 ops
[i
].neg
= !this_neg
;
2697 ops
[i
].op
= neg_const_int (mode
, this_op
);
2710 /* If we only have two operands, we can't do anything. */
2711 if (n_ops
<= 2 && !force
)
2714 /* Count the number of CONSTs we didn't split above. */
2715 for (i
= 0; i
< n_ops
; i
++)
2716 if (GET_CODE (ops
[i
].op
) == CONST
)
2719 /* Now simplify each pair of operands until nothing changes. The first
2720 time through just simplify constants against each other. */
2727 for (i
= 0; i
< n_ops
- 1; i
++)
2728 for (j
= i
+ 1; j
< n_ops
; j
++)
2730 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2731 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2733 if (lhs
!= 0 && rhs
!= 0
2734 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2736 enum rtx_code ncode
= PLUS
;
2742 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2744 else if (swap_commutative_operands_p (lhs
, rhs
))
2745 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2747 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2749 /* Reject "simplifications" that just wrap the two
2750 arguments in a CONST. Failure to do so can result
2751 in infinite recursion with simplify_binary_operation
2752 when it calls us to simplify CONST operations. */
2754 && ! (GET_CODE (tem
) == CONST
2755 && GET_CODE (XEXP (tem
, 0)) == ncode
2756 && XEXP (XEXP (tem
, 0), 0) == lhs
2757 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2758 /* Don't allow -x + -1 -> ~x simplifications in the
2759 first pass. This allows us the chance to combine
2760 the -1 with other constants. */
2762 && GET_CODE (tem
) == NOT
2763 && XEXP (tem
, 0) == rhs
))
2766 if (GET_CODE (tem
) == NEG
)
2767 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2768 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2769 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2773 ops
[j
].op
= NULL_RTX
;
2783 /* Pack all the operands to the lower-numbered entries. */
2784 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2788 /* Stabilize sort. */
2794 /* Sort the operations based on swap_commutative_operands_p. */
2795 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2797 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2799 && GET_CODE (ops
[1].op
) == CONST_INT
2800 && CONSTANT_P (ops
[0].op
)
2802 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2804 /* We suppressed creation of trivial CONST expressions in the
2805 combination loop to avoid recursion. Create one manually now.
2806 The combination loop should have ensured that there is exactly
2807 one CONST_INT, and the sort will have ensured that it is last
2808 in the array and that any other constant will be next-to-last. */
2811 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2812 && CONSTANT_P (ops
[n_ops
- 2].op
))
2814 rtx value
= ops
[n_ops
- 1].op
;
2815 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2816 value
= neg_const_int (mode
, value
);
2817 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2821 /* Count the number of CONSTs that we generated. */
2823 for (i
= 0; i
< n_ops
; i
++)
2824 if (GET_CODE (ops
[i
].op
) == CONST
)
2827 /* Give up if we didn't reduce the number of operands we had. Make
2828 sure we count a CONST as two operands. If we have the same
2829 number of operands, but have made more CONSTs than before, this
2830 is also an improvement, so accept it. */
2832 && (n_ops
+ n_consts
> input_ops
2833 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2836 /* Put a non-negated operand first, if possible. */
2838 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2841 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2850 /* Now make the result by performing the requested operations. */
2852 for (i
= 1; i
< n_ops
; i
++)
2853 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2854 mode
, result
, ops
[i
].op
);
2859 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2861 plus_minus_operand_p (rtx x
)
2863 return GET_CODE (x
) == PLUS
2864 || GET_CODE (x
) == MINUS
2865 || (GET_CODE (x
) == CONST
2866 && GET_CODE (XEXP (x
, 0)) == PLUS
2867 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
2868 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
2871 /* Like simplify_binary_operation except used for relational operators.
2872 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2873 not also be VOIDmode.
2875 CMP_MODE specifies in which mode the comparison is done in, so it is
2876 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2877 the operands or, if both are VOIDmode, the operands are compared in
2878 "infinite precision". */
2880 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2881 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2883 rtx tem
, trueop0
, trueop1
;
2885 if (cmp_mode
== VOIDmode
)
2886 cmp_mode
= GET_MODE (op0
);
2887 if (cmp_mode
== VOIDmode
)
2888 cmp_mode
= GET_MODE (op1
);
2890 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2893 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2895 if (tem
== const0_rtx
)
2896 return CONST0_RTX (mode
);
2897 #ifdef FLOAT_STORE_FLAG_VALUE
2899 REAL_VALUE_TYPE val
;
2900 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2901 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2907 if (VECTOR_MODE_P (mode
))
2909 if (tem
== const0_rtx
)
2910 return CONST0_RTX (mode
);
2911 #ifdef VECTOR_STORE_FLAG_VALUE
2916 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2917 if (val
== NULL_RTX
)
2919 if (val
== const1_rtx
)
2920 return CONST1_RTX (mode
);
2922 units
= GET_MODE_NUNITS (mode
);
2923 v
= rtvec_alloc (units
);
2924 for (i
= 0; i
< units
; i
++)
2925 RTVEC_ELT (v
, i
) = val
;
2926 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2936 /* For the following tests, ensure const0_rtx is op1. */
2937 if (swap_commutative_operands_p (op0
, op1
)
2938 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2939 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2941 /* If op0 is a compare, extract the comparison arguments from it. */
2942 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2943 return simplify_relational_operation (code
, mode
, VOIDmode
,
2944 XEXP (op0
, 0), XEXP (op0
, 1));
2946 if (mode
== VOIDmode
2947 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2951 trueop0
= avoid_constant_pool_reference (op0
);
2952 trueop1
= avoid_constant_pool_reference (op1
);
2953 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2957 /* This part of simplify_relational_operation is only used when CMP_MODE
2958 is not in class MODE_CC (i.e. it is a real comparison).
2960 MODE is the mode of the result, while CMP_MODE specifies in which
2961 mode the comparison is done in, so it is the mode of the operands. */
2964 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2965 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2967 enum rtx_code op0code
= GET_CODE (op0
);
2969 if (GET_CODE (op1
) == CONST_INT
)
2971 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2973 /* If op0 is a comparison, extract the comparison arguments form it. */
2976 if (GET_MODE (op0
) == mode
)
2977 return simplify_rtx (op0
);
2979 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2980 XEXP (op0
, 0), XEXP (op0
, 1));
2982 else if (code
== EQ
)
2984 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2985 if (new_code
!= UNKNOWN
)
2986 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2987 XEXP (op0
, 0), XEXP (op0
, 1));
2992 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2993 if ((code
== EQ
|| code
== NE
)
2994 && (op0code
== PLUS
|| op0code
== MINUS
)
2996 && CONSTANT_P (XEXP (op0
, 1))
2997 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
2999 rtx x
= XEXP (op0
, 0);
3000 rtx c
= XEXP (op0
, 1);
3002 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3004 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3007 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3008 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3010 && op1
== const0_rtx
3011 && GET_MODE_CLASS (mode
) == MODE_INT
3012 && cmp_mode
!= VOIDmode
3013 /* ??? Work-around BImode bugs in the ia64 backend. */
3015 && cmp_mode
!= BImode
3016 && nonzero_bits (op0
, cmp_mode
) == 1
3017 && STORE_FLAG_VALUE
== 1)
3018 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3019 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3020 : lowpart_subreg (mode
, op0
, cmp_mode
);
3025 /* Check if the given comparison (done in the given MODE) is actually a
3026 tautology or a contradiction.
3027 If no simplification is possible, this function returns zero.
3028 Otherwise, it returns either const_true_rtx or const0_rtx. */
3031 simplify_const_relational_operation (enum rtx_code code
,
3032 enum machine_mode mode
,
3035 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3040 gcc_assert (mode
!= VOIDmode
3041 || (GET_MODE (op0
) == VOIDmode
3042 && GET_MODE (op1
) == VOIDmode
));
3044 /* If op0 is a compare, extract the comparison arguments from it. */
3045 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3047 op1
= XEXP (op0
, 1);
3048 op0
= XEXP (op0
, 0);
3050 if (GET_MODE (op0
) != VOIDmode
)
3051 mode
= GET_MODE (op0
);
3052 else if (GET_MODE (op1
) != VOIDmode
)
3053 mode
= GET_MODE (op1
);
3058 /* We can't simplify MODE_CC values since we don't know what the
3059 actual comparison is. */
3060 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3063 /* Make sure the constant is second. */
3064 if (swap_commutative_operands_p (op0
, op1
))
3066 tem
= op0
, op0
= op1
, op1
= tem
;
3067 code
= swap_condition (code
);
3070 trueop0
= avoid_constant_pool_reference (op0
);
3071 trueop1
= avoid_constant_pool_reference (op1
);
3073 /* For integer comparisons of A and B maybe we can simplify A - B and can
3074 then simplify a comparison of that with zero. If A and B are both either
3075 a register or a CONST_INT, this can't help; testing for these cases will
3076 prevent infinite recursion here and speed things up.
3078 We can only do this for EQ and NE comparisons as otherwise we may
3079 lose or introduce overflow which we cannot disregard as undefined as
3080 we do not know the signedness of the operation on either the left or
3081 the right hand side of the comparison. */
3083 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3084 && (code
== EQ
|| code
== NE
)
3085 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3086 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3087 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3088 /* We cannot do this if tem is a nonzero address. */
3089 && ! nonzero_address_p (tem
))
3090 return simplify_const_relational_operation (signed_condition (code
),
3091 mode
, tem
, const0_rtx
);
3093 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3094 return const_true_rtx
;
3096 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3099 /* For modes without NaNs, if the two operands are equal, we know the
3100 result except if they have side-effects. */
3101 if (! HONOR_NANS (GET_MODE (trueop0
))
3102 && rtx_equal_p (trueop0
, trueop1
)
3103 && ! side_effects_p (trueop0
))
3104 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3106 /* If the operands are floating-point constants, see if we can fold
3108 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3109 && GET_CODE (trueop1
) == CONST_DOUBLE
3110 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
3112 REAL_VALUE_TYPE d0
, d1
;
3114 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3115 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3117 /* Comparisons are unordered iff at least one of the values is NaN. */
3118 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3128 return const_true_rtx
;
3141 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3142 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3143 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3146 /* Otherwise, see if the operands are both integers. */
3147 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3148 && (GET_CODE (trueop0
) == CONST_DOUBLE
3149 || GET_CODE (trueop0
) == CONST_INT
)
3150 && (GET_CODE (trueop1
) == CONST_DOUBLE
3151 || GET_CODE (trueop1
) == CONST_INT
))
3153 int width
= GET_MODE_BITSIZE (mode
);
3154 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3155 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3157 /* Get the two words comprising each integer constant. */
3158 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3160 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3161 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3165 l0u
= l0s
= INTVAL (trueop0
);
3166 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3169 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3171 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3172 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3176 l1u
= l1s
= INTVAL (trueop1
);
3177 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3180 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3181 we have to sign or zero-extend the values. */
3182 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3184 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3185 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3187 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3188 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3190 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3191 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3193 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3194 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3196 equal
= (h0u
== h1u
&& l0u
== l1u
);
3197 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3198 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3199 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3200 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3203 /* Otherwise, there are some code-specific tests we can make. */
3206 /* Optimize comparisons with upper and lower bounds. */
3207 if (SCALAR_INT_MODE_P (mode
)
3208 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3221 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3228 /* x >= min is always true. */
3229 if (rtx_equal_p (trueop1
, mmin
))
3230 tem
= const_true_rtx
;
3236 /* x <= max is always true. */
3237 if (rtx_equal_p (trueop1
, mmax
))
3238 tem
= const_true_rtx
;
3243 /* x > max is always false. */
3244 if (rtx_equal_p (trueop1
, mmax
))
3250 /* x < min is always false. */
3251 if (rtx_equal_p (trueop1
, mmin
))
3258 if (tem
== const0_rtx
3259 || tem
== const_true_rtx
)
3266 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3271 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3272 return const_true_rtx
;
3276 /* Optimize abs(x) < 0.0. */
3277 if (trueop1
== CONST0_RTX (mode
)
3278 && !HONOR_SNANS (mode
)
3279 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3281 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3283 if (GET_CODE (tem
) == ABS
)
3289 /* Optimize abs(x) >= 0.0. */
3290 if (trueop1
== CONST0_RTX (mode
)
3291 && !HONOR_NANS (mode
)
3292 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3294 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3296 if (GET_CODE (tem
) == ABS
)
3297 return const_true_rtx
;
3302 /* Optimize ! (abs(x) < 0.0). */
3303 if (trueop1
== CONST0_RTX (mode
))
3305 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3307 if (GET_CODE (tem
) == ABS
)
3308 return const_true_rtx
;
3319 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3325 return equal
? const_true_rtx
: const0_rtx
;
3328 return ! equal
? const_true_rtx
: const0_rtx
;
3331 return op0lt
? const_true_rtx
: const0_rtx
;
3334 return op1lt
? const_true_rtx
: const0_rtx
;
3336 return op0ltu
? const_true_rtx
: const0_rtx
;
3338 return op1ltu
? const_true_rtx
: const0_rtx
;
3341 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3344 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3346 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3348 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3350 return const_true_rtx
;
3358 /* Simplify CODE, an operation with result mode MODE and three operands,
3359 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3360 a constant. Return 0 if no simplifications is possible. */
3363 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3364 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3367 unsigned int width
= GET_MODE_BITSIZE (mode
);
3369 /* VOIDmode means "infinite" precision. */
3371 width
= HOST_BITS_PER_WIDE_INT
;
3377 if (GET_CODE (op0
) == CONST_INT
3378 && GET_CODE (op1
) == CONST_INT
3379 && GET_CODE (op2
) == CONST_INT
3380 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3381 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3383 /* Extracting a bit-field from a constant */
3384 HOST_WIDE_INT val
= INTVAL (op0
);
3386 if (BITS_BIG_ENDIAN
)
3387 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3388 - INTVAL (op2
) - INTVAL (op1
));
3390 val
>>= INTVAL (op2
);
3392 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3394 /* First zero-extend. */
3395 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3396 /* If desired, propagate sign bit. */
3397 if (code
== SIGN_EXTRACT
3398 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3399 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3402 /* Clear the bits that don't belong in our mode,
3403 unless they and our sign bit are all one.
3404 So we get either a reasonable negative value or a reasonable
3405 unsigned value for this mode. */
3406 if (width
< HOST_BITS_PER_WIDE_INT
3407 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3408 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3409 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3411 return gen_int_mode (val
, mode
);
3416 if (GET_CODE (op0
) == CONST_INT
)
3417 return op0
!= const0_rtx
? op1
: op2
;
3419 /* Convert c ? a : a into "a". */
3420 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3423 /* Convert a != b ? a : b into "a". */
3424 if (GET_CODE (op0
) == NE
3425 && ! side_effects_p (op0
)
3426 && ! HONOR_NANS (mode
)
3427 && ! HONOR_SIGNED_ZEROS (mode
)
3428 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3429 && rtx_equal_p (XEXP (op0
, 1), op2
))
3430 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3431 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3434 /* Convert a == b ? a : b into "b". */
3435 if (GET_CODE (op0
) == EQ
3436 && ! side_effects_p (op0
)
3437 && ! HONOR_NANS (mode
)
3438 && ! HONOR_SIGNED_ZEROS (mode
)
3439 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3440 && rtx_equal_p (XEXP (op0
, 1), op2
))
3441 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3442 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3445 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3447 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3448 ? GET_MODE (XEXP (op0
, 1))
3449 : GET_MODE (XEXP (op0
, 0)));
3452 /* Look for happy constants in op1 and op2. */
3453 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3455 HOST_WIDE_INT t
= INTVAL (op1
);
3456 HOST_WIDE_INT f
= INTVAL (op2
);
3458 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3459 code
= GET_CODE (op0
);
3460 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3463 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3471 return simplify_gen_relational (code
, mode
, cmp_mode
,
3472 XEXP (op0
, 0), XEXP (op0
, 1));
3475 if (cmp_mode
== VOIDmode
)
3476 cmp_mode
= op0_mode
;
3477 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3478 cmp_mode
, XEXP (op0
, 0),
3481 /* See if any simplifications were possible. */
3484 if (GET_CODE (temp
) == CONST_INT
)
3485 return temp
== const0_rtx
? op2
: op1
;
3487 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3493 gcc_assert (GET_MODE (op0
) == mode
);
3494 gcc_assert (GET_MODE (op1
) == mode
);
3495 gcc_assert (VECTOR_MODE_P (mode
));
3496 op2
= avoid_constant_pool_reference (op2
);
3497 if (GET_CODE (op2
) == CONST_INT
)
3499 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3500 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3501 int mask
= (1 << n_elts
) - 1;
3503 if (!(INTVAL (op2
) & mask
))
3505 if ((INTVAL (op2
) & mask
) == mask
)
3508 op0
= avoid_constant_pool_reference (op0
);
3509 op1
= avoid_constant_pool_reference (op1
);
3510 if (GET_CODE (op0
) == CONST_VECTOR
3511 && GET_CODE (op1
) == CONST_VECTOR
)
3513 rtvec v
= rtvec_alloc (n_elts
);
3516 for (i
= 0; i
< n_elts
; i
++)
3517 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3518 ? CONST_VECTOR_ELT (op0
, i
)
3519 : CONST_VECTOR_ELT (op1
, i
));
3520 return gen_rtx_CONST_VECTOR (mode
, v
);
3532 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3533 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3535 Works by unpacking OP into a collection of 8-bit values
3536 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3537 and then repacking them again for OUTERMODE. */
3540 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3541 enum machine_mode innermode
, unsigned int byte
)
3543 /* We support up to 512-bit values (for V8DFmode). */
3547 value_mask
= (1 << value_bit
) - 1
3549 unsigned char value
[max_bitsize
/ value_bit
];
3558 rtvec result_v
= NULL
;
3559 enum mode_class outer_class
;
3560 enum machine_mode outer_submode
;
3562 /* Some ports misuse CCmode. */
3563 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3566 /* We have no way to represent a complex constant at the rtl level. */
3567 if (COMPLEX_MODE_P (outermode
))
3570 /* Unpack the value. */
3572 if (GET_CODE (op
) == CONST_VECTOR
)
3574 num_elem
= CONST_VECTOR_NUNITS (op
);
3575 elems
= &CONST_VECTOR_ELT (op
, 0);
3576 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3582 elem_bitsize
= max_bitsize
;
3584 /* If this asserts, it is too complicated; reducing value_bit may help. */
3585 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3586 /* I don't know how to handle endianness of sub-units. */
3587 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3589 for (elem
= 0; elem
< num_elem
; elem
++)
3592 rtx el
= elems
[elem
];
3594 /* Vectors are kept in target memory order. (This is probably
3597 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3598 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3600 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3601 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3602 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3603 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3604 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3607 switch (GET_CODE (el
))
3611 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3613 *vp
++ = INTVAL (el
) >> i
;
3614 /* CONST_INTs are always logically sign-extended. */
3615 for (; i
< elem_bitsize
; i
+= value_bit
)
3616 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3620 if (GET_MODE (el
) == VOIDmode
)
3622 /* If this triggers, someone should have generated a
3623 CONST_INT instead. */
3624 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3626 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3627 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3628 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3631 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3634 /* It shouldn't matter what's done here, so fill it with
3636 for (; i
< elem_bitsize
; i
+= value_bit
)
3641 long tmp
[max_bitsize
/ 32];
3642 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3644 gcc_assert (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
);
3645 gcc_assert (bitsize
<= elem_bitsize
);
3646 gcc_assert (bitsize
% value_bit
== 0);
3648 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3651 /* real_to_target produces its result in words affected by
3652 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3653 and use WORDS_BIG_ENDIAN instead; see the documentation
3654 of SUBREG in rtl.texi. */
3655 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3658 if (WORDS_BIG_ENDIAN
)
3659 ibase
= bitsize
- 1 - i
;
3662 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3665 /* It shouldn't matter what's done here, so fill it with
3667 for (; i
< elem_bitsize
; i
+= value_bit
)
3677 /* Now, pick the right byte to start with. */
3678 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3679 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3680 will already have offset 0. */
3681 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3683 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3685 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3686 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3687 byte
= (subword_byte
% UNITS_PER_WORD
3688 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3691 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3692 so if it's become negative it will instead be very large.) */
3693 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3695 /* Convert from bytes to chunks of size value_bit. */
3696 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3698 /* Re-pack the value. */
3700 if (VECTOR_MODE_P (outermode
))
3702 num_elem
= GET_MODE_NUNITS (outermode
);
3703 result_v
= rtvec_alloc (num_elem
);
3704 elems
= &RTVEC_ELT (result_v
, 0);
3705 outer_submode
= GET_MODE_INNER (outermode
);
3711 outer_submode
= outermode
;
3714 outer_class
= GET_MODE_CLASS (outer_submode
);
3715 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3717 gcc_assert (elem_bitsize
% value_bit
== 0);
3718 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3720 for (elem
= 0; elem
< num_elem
; elem
++)
3724 /* Vectors are stored in target memory order. (This is probably
3727 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3728 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3730 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3731 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3732 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3733 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3734 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3737 switch (outer_class
)
3740 case MODE_PARTIAL_INT
:
3742 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3745 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3747 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3748 for (; i
< elem_bitsize
; i
+= value_bit
)
3749 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3750 << (i
- HOST_BITS_PER_WIDE_INT
));
3752 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3754 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3755 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3756 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
3757 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3766 long tmp
[max_bitsize
/ 32];
3768 /* real_from_target wants its input in words affected by
3769 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3770 and use WORDS_BIG_ENDIAN instead; see the documentation
3771 of SUBREG in rtl.texi. */
3772 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3774 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3777 if (WORDS_BIG_ENDIAN
)
3778 ibase
= elem_bitsize
- 1 - i
;
3781 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3784 real_from_target (&r
, tmp
, outer_submode
);
3785 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3793 if (VECTOR_MODE_P (outermode
))
3794 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3799 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3800 Return 0 if no simplifications are possible. */
3802 simplify_subreg (enum machine_mode outermode
, rtx op
,
3803 enum machine_mode innermode
, unsigned int byte
)
3805 /* Little bit of sanity checking. */
3806 gcc_assert (innermode
!= VOIDmode
);
3807 gcc_assert (outermode
!= VOIDmode
);
3808 gcc_assert (innermode
!= BLKmode
);
3809 gcc_assert (outermode
!= BLKmode
);
3811 gcc_assert (GET_MODE (op
) == innermode
3812 || GET_MODE (op
) == VOIDmode
);
3814 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3815 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3817 if (outermode
== innermode
&& !byte
)
3820 if (GET_CODE (op
) == CONST_INT
3821 || GET_CODE (op
) == CONST_DOUBLE
3822 || GET_CODE (op
) == CONST_VECTOR
)
3823 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3825 /* Changing mode twice with SUBREG => just change it once,
3826 or not at all if changing back op starting mode. */
3827 if (GET_CODE (op
) == SUBREG
)
3829 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3830 int final_offset
= byte
+ SUBREG_BYTE (op
);
3833 if (outermode
== innermostmode
3834 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3835 return SUBREG_REG (op
);
3837 /* The SUBREG_BYTE represents offset, as if the value were stored
3838 in memory. Irritating exception is paradoxical subreg, where
3839 we define SUBREG_BYTE to be 0. On big endian machines, this
3840 value should be negative. For a moment, undo this exception. */
3841 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3843 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3844 if (WORDS_BIG_ENDIAN
)
3845 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3846 if (BYTES_BIG_ENDIAN
)
3847 final_offset
+= difference
% UNITS_PER_WORD
;
3849 if (SUBREG_BYTE (op
) == 0
3850 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3852 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3853 if (WORDS_BIG_ENDIAN
)
3854 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3855 if (BYTES_BIG_ENDIAN
)
3856 final_offset
+= difference
% UNITS_PER_WORD
;
3859 /* See whether resulting subreg will be paradoxical. */
3860 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3862 /* In nonparadoxical subregs we can't handle negative offsets. */
3863 if (final_offset
< 0)
3865 /* Bail out in case resulting subreg would be incorrect. */
3866 if (final_offset
% GET_MODE_SIZE (outermode
)
3867 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3873 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3875 /* In paradoxical subreg, see if we are still looking on lower part.
3876 If so, our SUBREG_BYTE will be 0. */
3877 if (WORDS_BIG_ENDIAN
)
3878 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3879 if (BYTES_BIG_ENDIAN
)
3880 offset
+= difference
% UNITS_PER_WORD
;
3881 if (offset
== final_offset
)
3887 /* Recurse for further possible simplifications. */
3888 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
3892 if (validate_subreg (outermode
, innermostmode
,
3893 SUBREG_REG (op
), final_offset
))
3894 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3898 /* SUBREG of a hard register => just change the register number
3899 and/or mode. If the hard register is not valid in that mode,
3900 suppress this simplification. If the hard register is the stack,
3901 frame, or argument pointer, leave this as a SUBREG. */
3904 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3905 #ifdef CANNOT_CHANGE_MODE_CLASS
3906 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3907 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3908 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3910 && ((reload_completed
&& !frame_pointer_needed
)
3911 || (REGNO (op
) != FRAME_POINTER_REGNUM
3912 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3913 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3916 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3917 && REGNO (op
) != ARG_POINTER_REGNUM
3919 && REGNO (op
) != STACK_POINTER_REGNUM
3920 && subreg_offset_representable_p (REGNO (op
), innermode
,
3923 unsigned int regno
= REGNO (op
);
3924 unsigned int final_regno
3925 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
3927 /* ??? We do allow it if the current REG is not valid for
3928 its mode. This is a kludge to work around how float/complex
3929 arguments are passed on 32-bit SPARC and should be fixed. */
3930 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3931 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
3933 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3935 /* Propagate original regno. We don't have any way to specify
3936 the offset inside original regno, so do so only for lowpart.
3937 The information is used only by alias analysis that can not
3938 grog partial register anyway. */
3940 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3941 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3946 /* If we have a SUBREG of a register that we are replacing and we are
3947 replacing it with a MEM, make a new MEM and try replacing the
3948 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3949 or if we would be widening it. */
3952 && ! mode_dependent_address_p (XEXP (op
, 0))
3953 /* Allow splitting of volatile memory references in case we don't
3954 have instruction to move the whole thing. */
3955 && (! MEM_VOLATILE_P (op
)
3956 || ! have_insn_for (SET
, innermode
))
3957 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3958 return adjust_address_nv (op
, outermode
, byte
);
3960 /* Handle complex values represented as CONCAT
3961 of real and imaginary part. */
3962 if (GET_CODE (op
) == CONCAT
)
3964 unsigned int inner_size
, final_offset
;
3967 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
3968 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
3969 final_offset
= byte
% inner_size
;
3970 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
3973 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3976 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
3977 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3981 /* Optimize SUBREG truncations of zero and sign extended values. */
3982 if ((GET_CODE (op
) == ZERO_EXTEND
3983 || GET_CODE (op
) == SIGN_EXTEND
)
3984 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3986 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3988 /* If we're requesting the lowpart of a zero or sign extension,
3989 there are three possibilities. If the outermode is the same
3990 as the origmode, we can omit both the extension and the subreg.
3991 If the outermode is not larger than the origmode, we can apply
3992 the truncation without the extension. Finally, if the outermode
3993 is larger than the origmode, but both are integer modes, we
3994 can just extend to the appropriate mode. */
3997 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3998 if (outermode
== origmode
)
3999 return XEXP (op
, 0);
4000 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4001 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4002 subreg_lowpart_offset (outermode
,
4004 if (SCALAR_INT_MODE_P (outermode
))
4005 return simplify_gen_unary (GET_CODE (op
), outermode
,
4006 XEXP (op
, 0), origmode
);
4009 /* A SUBREG resulting from a zero extension may fold to zero if
4010 it extracts higher bits that the ZERO_EXTEND's source bits. */
4011 if (GET_CODE (op
) == ZERO_EXTEND
4012 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4013 return CONST0_RTX (outermode
);
4016 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4017 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4018 the outer subreg is effectively a truncation to the original mode. */
4019 if ((GET_CODE (op
) == LSHIFTRT
4020 || GET_CODE (op
) == ASHIFTRT
)
4021 && SCALAR_INT_MODE_P (outermode
)
4022 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4023 to avoid the possibility that an outer LSHIFTRT shifts by more
4024 than the sign extension's sign_bit_copies and introduces zeros
4025 into the high bits of the result. */
4026 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4027 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4028 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4029 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4030 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4031 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4032 return simplify_gen_binary (ASHIFTRT
, outermode
,
4033 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4035 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4036 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4037 the outer subreg is effectively a truncation to the original mode. */
4038 if ((GET_CODE (op
) == LSHIFTRT
4039 || GET_CODE (op
) == ASHIFTRT
)
4040 && SCALAR_INT_MODE_P (outermode
)
4041 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4042 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4043 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4044 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4045 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4046 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4047 return simplify_gen_binary (LSHIFTRT
, outermode
,
4048 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4050 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4051 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4052 the outer subreg is effectively a truncation to the original mode. */
4053 if (GET_CODE (op
) == ASHIFT
4054 && SCALAR_INT_MODE_P (outermode
)
4055 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4056 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4057 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4058 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4059 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4060 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4061 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4062 return simplify_gen_binary (ASHIFT
, outermode
,
4063 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4068 /* Make a SUBREG operation or equivalent if it folds. */
4071 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4072 enum machine_mode innermode
, unsigned int byte
)
4076 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4080 if (GET_CODE (op
) == SUBREG
4081 || GET_CODE (op
) == CONCAT
4082 || GET_MODE (op
) == VOIDmode
)
4085 if (validate_subreg (outermode
, innermode
, op
, byte
))
4086 return gen_rtx_SUBREG (outermode
, op
, byte
);
4091 /* Simplify X, an rtx expression.
4093 Return the simplified expression or NULL if no simplifications
4096 This is the preferred entry point into the simplification routines;
4097 however, we still allow passes to call the more specific routines.
4099 Right now GCC has three (yes, three) major bodies of RTL simplification
4100 code that need to be unified.
4102 1. fold_rtx in cse.c. This code uses various CSE specific
4103 information to aid in RTL simplification.
4105 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4106 it uses combine specific information to aid in RTL
4109 3. The routines in this file.
4112 Long term we want to only have one body of simplification code; to
4113 get to that state I recommend the following steps:
4115 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4116 which are not pass dependent state into these routines.
4118 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4119 use this routine whenever possible.
4121 3. Allow for pass dependent state to be provided to these
4122 routines and add simplifications based on the pass dependent
4123 state. Remove code from cse.c & combine.c that becomes
4126 It will take time, but ultimately the compiler will be easier to
4127 maintain and improve. It's totally silly that when we add a
4128 simplification that it needs to be added to 4 places (3 for RTL
4129 simplification and 1 for tree simplification. */
4132 simplify_rtx (rtx x
)
4134 enum rtx_code code
= GET_CODE (x
);
4135 enum machine_mode mode
= GET_MODE (x
);
4137 switch (GET_RTX_CLASS (code
))
4140 return simplify_unary_operation (code
, mode
,
4141 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4142 case RTX_COMM_ARITH
:
4143 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4144 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4146 /* Fall through.... */
4149 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4152 case RTX_BITFIELD_OPS
:
4153 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4154 XEXP (x
, 0), XEXP (x
, 1),
4158 case RTX_COMM_COMPARE
:
4159 return simplify_relational_operation (code
, mode
,
4160 ((GET_MODE (XEXP (x
, 0))
4162 ? GET_MODE (XEXP (x
, 0))
4163 : GET_MODE (XEXP (x
, 1))),
4169 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4170 GET_MODE (SUBREG_REG (x
)),
4177 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4178 if (GET_CODE (XEXP (x
, 0)) == HIGH
4179 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))