1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
41 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
43 /* Forward declarations */
44 static void set_of_1 (rtx
, const_rtx
, void *);
45 static bool covers_regno_p (const_rtx
, unsigned int);
46 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
47 static int rtx_referenced_p_1 (rtx
*, void *);
48 static int computed_jump_p_1 (const_rtx
);
49 static void parms_set (rtx
, const_rtx
, void *);
51 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
52 const_rtx
, enum machine_mode
,
53 unsigned HOST_WIDE_INT
);
54 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
55 const_rtx
, enum machine_mode
,
56 unsigned HOST_WIDE_INT
);
57 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
60 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
61 enum machine_mode
, unsigned int);
63 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
64 -1 if a code has no such operand. */
65 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
67 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
68 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
69 SIGN_EXTEND then while narrowing we also have to enforce the
70 representation and sign-extend the value to mode DESTINATION_REP.
72 If the value is already sign-extended to DESTINATION_REP mode we
73 can just switch to DESTINATION mode on it. For each pair of
74 integral modes SOURCE and DESTINATION, when truncating from SOURCE
75 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
76 contains the number of high-order bits in SOURCE that have to be
77 copies of the sign-bit so that we can do this mode-switch to
81 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
83 /* Return 1 if the value of X is unstable
84 (would be different at a different point in the program).
85 The frame pointer, arg pointer, etc. are considered stable
86 (within one function) and so is anything marked `unchanging'. */
89 rtx_unstable_p (const_rtx x
)
91 const RTX_CODE code
= GET_CODE (x
);
98 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
110 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
111 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
112 /* The arg pointer varies if it is not a fixed register. */
113 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
115 /* ??? When call-clobbered, the value is stable modulo the restore
116 that must happen after a call. This currently screws up local-alloc
117 into believing that the restore is not needed. */
118 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
123 if (MEM_VOLATILE_P (x
))
132 fmt
= GET_RTX_FORMAT (code
);
133 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
136 if (rtx_unstable_p (XEXP (x
, i
)))
139 else if (fmt
[i
] == 'E')
142 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
143 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
150 /* Return 1 if X has a value that can vary even between two
151 executions of the program. 0 means X can be compared reliably
152 against certain constants or near-constants.
153 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
154 zero, we are slightly more conservative.
155 The frame pointer and the arg pointer are considered constant. */
158 rtx_varies_p (const_rtx x
, bool for_alias
)
171 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
183 /* Note that we have to test for the actual rtx used for the frame
184 and arg pointers and not just the register number in case we have
185 eliminated the frame and/or arg pointer and are using it
187 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
188 /* The arg pointer varies if it is not a fixed register. */
189 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
191 if (x
== pic_offset_table_rtx
192 /* ??? When call-clobbered, the value is stable modulo the restore
193 that must happen after a call. This currently screws up
194 local-alloc into believing that the restore is not needed, so we
195 must return 0 only if we are called from alias analysis. */
196 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
201 /* The operand 0 of a LO_SUM is considered constant
202 (in fact it is related specifically to operand 1)
203 during alias analysis. */
204 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
205 || rtx_varies_p (XEXP (x
, 1), for_alias
);
208 if (MEM_VOLATILE_P (x
))
217 fmt
= GET_RTX_FORMAT (code
);
218 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
221 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
224 else if (fmt
[i
] == 'E')
227 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
228 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
235 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
236 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
237 whether nonzero is returned for unaligned memory accesses on strict
238 alignment machines. */
241 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
242 enum machine_mode mode
, bool unaligned_mems
)
244 enum rtx_code code
= GET_CODE (x
);
248 && GET_MODE_SIZE (mode
) != 0)
250 HOST_WIDE_INT actual_offset
= offset
;
251 #ifdef SPARC_STACK_BOUNDARY_HACK
252 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
253 the real alignment of %sp. However, when it does this, the
254 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
255 if (SPARC_STACK_BOUNDARY_HACK
256 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
257 actual_offset
-= STACK_POINTER_OFFSET
;
260 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
267 if (SYMBOL_REF_WEAK (x
))
269 if (!CONSTANT_POOL_ADDRESS_P (x
))
272 HOST_WIDE_INT decl_size
;
277 size
= GET_MODE_SIZE (mode
);
281 /* If the size of the access or of the symbol is unknown,
283 decl
= SYMBOL_REF_DECL (x
);
285 /* Else check that the access is in bounds. TODO: restructure
286 expr_size/tree_expr_size/int_expr_size and just use the latter. */
289 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
290 decl_size
= (host_integerp (DECL_SIZE_UNIT (decl
), 0)
291 ? tree_low_cst (DECL_SIZE_UNIT (decl
), 0)
293 else if (TREE_CODE (decl
) == STRING_CST
)
294 decl_size
= TREE_STRING_LENGTH (decl
);
295 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
296 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
300 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
309 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
310 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
311 || x
== stack_pointer_rtx
312 /* The arg pointer varies if it is not a fixed register. */
313 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
315 /* All of the virtual frame registers are stack references. */
316 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
317 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
322 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
323 mode
, unaligned_mems
);
326 /* An address is assumed not to trap if:
327 - it is the pic register plus a constant. */
328 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
331 /* - or it is an address that can't trap plus a constant integer,
332 with the proper remainder modulo the mode size if we are
333 considering unaligned memory references. */
334 if (CONST_INT_P (XEXP (x
, 1))
335 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
336 size
, mode
, unaligned_mems
))
343 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
344 mode
, unaligned_mems
);
351 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
352 mode
, unaligned_mems
);
358 /* If it isn't one of the case above, it can cause a trap. */
362 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
365 rtx_addr_can_trap_p (const_rtx x
)
367 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
370 /* Return true if X is an address that is known to not be zero. */
373 nonzero_address_p (const_rtx x
)
375 const enum rtx_code code
= GET_CODE (x
);
380 return !SYMBOL_REF_WEAK (x
);
386 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
387 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
388 || x
== stack_pointer_rtx
389 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
391 /* All of the virtual frame registers are stack references. */
392 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
393 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
398 return nonzero_address_p (XEXP (x
, 0));
401 if (CONST_INT_P (XEXP (x
, 1)))
402 return nonzero_address_p (XEXP (x
, 0));
403 /* Handle PIC references. */
404 else if (XEXP (x
, 0) == pic_offset_table_rtx
405 && CONSTANT_P (XEXP (x
, 1)))
410 /* Similar to the above; allow positive offsets. Further, since
411 auto-inc is only allowed in memories, the register must be a
413 if (CONST_INT_P (XEXP (x
, 1))
414 && INTVAL (XEXP (x
, 1)) > 0)
416 return nonzero_address_p (XEXP (x
, 0));
419 /* Similarly. Further, the offset is always positive. */
426 return nonzero_address_p (XEXP (x
, 0));
429 return nonzero_address_p (XEXP (x
, 1));
435 /* If it isn't one of the case above, might be zero. */
439 /* Return 1 if X refers to a memory location whose address
440 cannot be compared reliably with constant addresses,
441 or if X refers to a BLKmode memory object.
442 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
443 zero, we are slightly more conservative. */
446 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
457 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
459 fmt
= GET_RTX_FORMAT (code
);
460 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
463 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
466 else if (fmt
[i
] == 'E')
469 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
470 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
476 /* Return the value of the integer term in X, if one is apparent;
478 Only obvious integer terms are detected.
479 This is used in cse.c with the `related_value' field. */
482 get_integer_term (const_rtx x
)
484 if (GET_CODE (x
) == CONST
)
487 if (GET_CODE (x
) == MINUS
488 && CONST_INT_P (XEXP (x
, 1)))
489 return - INTVAL (XEXP (x
, 1));
490 if (GET_CODE (x
) == PLUS
491 && CONST_INT_P (XEXP (x
, 1)))
492 return INTVAL (XEXP (x
, 1));
496 /* If X is a constant, return the value sans apparent integer term;
498 Only obvious integer terms are detected. */
501 get_related_value (const_rtx x
)
503 if (GET_CODE (x
) != CONST
)
506 if (GET_CODE (x
) == PLUS
507 && CONST_INT_P (XEXP (x
, 1)))
509 else if (GET_CODE (x
) == MINUS
510 && CONST_INT_P (XEXP (x
, 1)))
515 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
516 to somewhere in the same object or object_block as SYMBOL. */
519 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
523 if (GET_CODE (symbol
) != SYMBOL_REF
)
531 if (CONSTANT_POOL_ADDRESS_P (symbol
)
532 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
535 decl
= SYMBOL_REF_DECL (symbol
);
536 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
540 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
541 && SYMBOL_REF_BLOCK (symbol
)
542 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
543 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
544 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
550 /* Split X into a base and a constant offset, storing them in *BASE_OUT
551 and *OFFSET_OUT respectively. */
554 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
556 if (GET_CODE (x
) == CONST
)
559 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
561 *base_out
= XEXP (x
, 0);
562 *offset_out
= XEXP (x
, 1);
567 *offset_out
= const0_rtx
;
570 /* Return the number of places FIND appears within X. If COUNT_DEST is
571 zero, we do not count occurrences inside the destination of a SET. */
574 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
578 const char *format_ptr
;
600 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
602 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
606 if (MEM_P (find
) && rtx_equal_p (x
, find
))
611 if (SET_DEST (x
) == find
&& ! count_dest
)
612 return count_occurrences (SET_SRC (x
), find
, count_dest
);
619 format_ptr
= GET_RTX_FORMAT (code
);
622 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
624 switch (*format_ptr
++)
627 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
631 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
632 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
640 /* Nonzero if register REG appears somewhere within IN.
641 Also works if REG is not a register; in this case it checks
642 for a subexpression of IN that is Lisp "equal" to REG. */
645 reg_mentioned_p (const_rtx reg
, const_rtx in
)
657 if (GET_CODE (in
) == LABEL_REF
)
658 return reg
== XEXP (in
, 0);
660 code
= GET_CODE (in
);
664 /* Compare registers by number. */
666 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
668 /* These codes have no constituent expressions
679 /* These are kept unique for a given value. */
686 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
689 fmt
= GET_RTX_FORMAT (code
);
691 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
696 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
697 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
700 else if (fmt
[i
] == 'e'
701 && reg_mentioned_p (reg
, XEXP (in
, i
)))
707 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
708 no CODE_LABEL insn. */
711 no_labels_between_p (const_rtx beg
, const_rtx end
)
716 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
722 /* Nonzero if register REG is used in an insn between
723 FROM_INSN and TO_INSN (exclusive of those two). */
726 reg_used_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
730 if (from_insn
== to_insn
)
733 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
734 if (NONDEBUG_INSN_P (insn
)
735 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
736 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
741 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
742 is entirely replaced by a new value and the only use is as a SET_DEST,
743 we do not consider it a reference. */
746 reg_referenced_p (const_rtx x
, const_rtx body
)
750 switch (GET_CODE (body
))
753 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
756 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
757 of a REG that occupies all of the REG, the insn references X if
758 it is mentioned in the destination. */
759 if (GET_CODE (SET_DEST (body
)) != CC0
760 && GET_CODE (SET_DEST (body
)) != PC
761 && !REG_P (SET_DEST (body
))
762 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
763 && REG_P (SUBREG_REG (SET_DEST (body
)))
764 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
765 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
766 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
767 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
768 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
773 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
774 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
781 return reg_overlap_mentioned_p (x
, body
);
784 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
787 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
790 case UNSPEC_VOLATILE
:
791 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
792 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
797 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
798 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
803 if (MEM_P (XEXP (body
, 0)))
804 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
809 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
811 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
818 /* Nonzero if register REG is set or clobbered in an insn between
819 FROM_INSN and TO_INSN (exclusive of those two). */
822 reg_set_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
826 if (from_insn
== to_insn
)
829 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
830 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
835 /* Internals of reg_set_between_p. */
837 reg_set_p (const_rtx reg
, const_rtx insn
)
839 /* We can be passed an insn or part of one. If we are passed an insn,
840 check if a side-effect of the insn clobbers REG. */
842 && (FIND_REG_INC_NOTE (insn
, reg
)
845 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
846 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
847 GET_MODE (reg
), REGNO (reg
)))
849 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
852 return set_of (reg
, insn
) != NULL_RTX
;
855 /* Similar to reg_set_between_p, but check all registers in X. Return 0
856 only if none of them are modified between START and END. Return 1 if
857 X contains a MEM; this routine does use memory aliasing. */
860 modified_between_p (const_rtx x
, const_rtx start
, const_rtx end
)
862 const enum rtx_code code
= GET_CODE (x
);
886 if (modified_between_p (XEXP (x
, 0), start
, end
))
888 if (MEM_READONLY_P (x
))
890 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
891 if (memory_modified_in_insn_p (x
, insn
))
897 return reg_set_between_p (x
, start
, end
);
903 fmt
= GET_RTX_FORMAT (code
);
904 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
906 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
909 else if (fmt
[i
] == 'E')
910 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
911 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
918 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
919 of them are modified in INSN. Return 1 if X contains a MEM; this routine
920 does use memory aliasing. */
923 modified_in_p (const_rtx x
, const_rtx insn
)
925 const enum rtx_code code
= GET_CODE (x
);
945 if (modified_in_p (XEXP (x
, 0), insn
))
947 if (MEM_READONLY_P (x
))
949 if (memory_modified_in_insn_p (x
, insn
))
955 return reg_set_p (x
, insn
);
961 fmt
= GET_RTX_FORMAT (code
);
962 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
964 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
967 else if (fmt
[i
] == 'E')
968 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
969 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
976 /* Helper function for set_of. */
984 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
986 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
987 if (rtx_equal_p (x
, data
->pat
)
988 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
992 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
993 (either directly or via STRICT_LOW_PART and similar modifiers). */
995 set_of (const_rtx pat
, const_rtx insn
)
997 struct set_of_data data
;
998 data
.found
= NULL_RTX
;
1000 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1004 /* Given an INSN, return a SET expression if this insn has only a single SET.
1005 It may also have CLOBBERs, USEs, or SET whose output
1006 will not be used, which we ignore. */
1009 single_set_2 (const_rtx insn
, const_rtx pat
)
1012 int set_verified
= 1;
1015 if (GET_CODE (pat
) == PARALLEL
)
1017 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1019 rtx sub
= XVECEXP (pat
, 0, i
);
1020 switch (GET_CODE (sub
))
1027 /* We can consider insns having multiple sets, where all
1028 but one are dead as single set insns. In common case
1029 only single set is present in the pattern so we want
1030 to avoid checking for REG_UNUSED notes unless necessary.
1032 When we reach set first time, we just expect this is
1033 the single set we are looking for and only when more
1034 sets are found in the insn, we check them. */
1037 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1038 && !side_effects_p (set
))
1044 set
= sub
, set_verified
= 0;
1045 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1046 || side_effects_p (sub
))
1058 /* Given an INSN, return nonzero if it has more than one SET, else return
1062 multiple_sets (const_rtx insn
)
1067 /* INSN must be an insn. */
1068 if (! INSN_P (insn
))
1071 /* Only a PARALLEL can have multiple SETs. */
1072 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1074 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1075 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1077 /* If we have already found a SET, then return now. */
1085 /* Either zero or one SET. */
1089 /* Return nonzero if the destination of SET equals the source
1090 and there are no side effects. */
1093 set_noop_p (const_rtx set
)
1095 rtx src
= SET_SRC (set
);
1096 rtx dst
= SET_DEST (set
);
1098 if (dst
== pc_rtx
&& src
== pc_rtx
)
1101 if (MEM_P (dst
) && MEM_P (src
))
1102 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1104 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1105 return rtx_equal_p (XEXP (dst
, 0), src
)
1106 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1107 && !side_effects_p (src
);
1109 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1110 dst
= XEXP (dst
, 0);
1112 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1114 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1116 src
= SUBREG_REG (src
);
1117 dst
= SUBREG_REG (dst
);
1120 return (REG_P (src
) && REG_P (dst
)
1121 && REGNO (src
) == REGNO (dst
));
1124 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1128 noop_move_p (const_rtx insn
)
1130 rtx pat
= PATTERN (insn
);
1132 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1135 /* Insns carrying these notes are useful later on. */
1136 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1139 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1142 if (GET_CODE (pat
) == PARALLEL
)
1145 /* If nothing but SETs of registers to themselves,
1146 this insn can also be deleted. */
1147 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1149 rtx tem
= XVECEXP (pat
, 0, i
);
1151 if (GET_CODE (tem
) == USE
1152 || GET_CODE (tem
) == CLOBBER
)
1155 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1165 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1166 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1167 If the object was modified, if we hit a partial assignment to X, or hit a
1168 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1169 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1173 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1177 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1181 rtx set
= single_set (p
);
1182 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1184 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1186 rtx src
= SET_SRC (set
);
1188 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1189 src
= XEXP (note
, 0);
1191 if ((valid_to
== NULL_RTX
1192 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1193 /* Reject hard registers because we don't usually want
1194 to use them; we'd rather use a pseudo. */
1196 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1203 /* If set in non-simple way, we don't have a value. */
1204 if (reg_set_p (x
, p
))
1211 /* Return nonzero if register in range [REGNO, ENDREGNO)
1212 appears either explicitly or implicitly in X
1213 other than being stored into.
1215 References contained within the substructure at LOC do not count.
1216 LOC may be zero, meaning don't ignore anything. */
1219 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1223 unsigned int x_regno
;
1228 /* The contents of a REG_NONNEG note is always zero, so we must come here
1229 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1233 code
= GET_CODE (x
);
1238 x_regno
= REGNO (x
);
1240 /* If we modifying the stack, frame, or argument pointer, it will
1241 clobber a virtual register. In fact, we could be more precise,
1242 but it isn't worth it. */
1243 if ((x_regno
== STACK_POINTER_REGNUM
1244 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1245 || x_regno
== ARG_POINTER_REGNUM
1247 || x_regno
== FRAME_POINTER_REGNUM
)
1248 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1251 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1254 /* If this is a SUBREG of a hard reg, we can see exactly which
1255 registers are being modified. Otherwise, handle normally. */
1256 if (REG_P (SUBREG_REG (x
))
1257 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1259 unsigned int inner_regno
= subreg_regno (x
);
1260 unsigned int inner_endregno
1261 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1262 ? subreg_nregs (x
) : 1);
1264 return endregno
> inner_regno
&& regno
< inner_endregno
;
1270 if (&SET_DEST (x
) != loc
1271 /* Note setting a SUBREG counts as referring to the REG it is in for
1272 a pseudo but not for hard registers since we can
1273 treat each word individually. */
1274 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1275 && loc
!= &SUBREG_REG (SET_DEST (x
))
1276 && REG_P (SUBREG_REG (SET_DEST (x
)))
1277 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1278 && refers_to_regno_p (regno
, endregno
,
1279 SUBREG_REG (SET_DEST (x
)), loc
))
1280 || (!REG_P (SET_DEST (x
))
1281 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1284 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1293 /* X does not match, so try its subexpressions. */
1295 fmt
= GET_RTX_FORMAT (code
);
1296 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1298 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1306 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1309 else if (fmt
[i
] == 'E')
1312 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1313 if (loc
!= &XVECEXP (x
, i
, j
)
1314 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1321 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1322 we check if any register number in X conflicts with the relevant register
1323 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1324 contains a MEM (we don't bother checking for memory addresses that can't
1325 conflict because we expect this to be a rare case. */
1328 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1330 unsigned int regno
, endregno
;
1332 /* If either argument is a constant, then modifying X can not
1333 affect IN. Here we look at IN, we can profitably combine
1334 CONSTANT_P (x) with the switch statement below. */
1335 if (CONSTANT_P (in
))
1339 switch (GET_CODE (x
))
1341 case STRICT_LOW_PART
:
1344 /* Overly conservative. */
1349 regno
= REGNO (SUBREG_REG (x
));
1350 if (regno
< FIRST_PSEUDO_REGISTER
)
1351 regno
= subreg_regno (x
);
1352 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1353 ? subreg_nregs (x
) : 1);
1358 endregno
= END_REGNO (x
);
1360 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1370 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1371 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1374 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1377 else if (fmt
[i
] == 'E')
1380 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1381 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1391 return reg_mentioned_p (x
, in
);
1397 /* If any register in here refers to it we return true. */
1398 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1399 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1400 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1406 gcc_assert (CONSTANT_P (x
));
1411 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1412 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1413 ignored by note_stores, but passed to FUN.
1415 FUN receives three arguments:
1416 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1417 2. the SET or CLOBBER rtx that does the store,
1418 3. the pointer DATA provided to note_stores.
1420 If the item being stored in or clobbered is a SUBREG of a hard register,
1421 the SUBREG will be passed. */
1424 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1428 if (GET_CODE (x
) == COND_EXEC
)
1429 x
= COND_EXEC_CODE (x
);
1431 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1433 rtx dest
= SET_DEST (x
);
1435 while ((GET_CODE (dest
) == SUBREG
1436 && (!REG_P (SUBREG_REG (dest
))
1437 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1438 || GET_CODE (dest
) == ZERO_EXTRACT
1439 || GET_CODE (dest
) == STRICT_LOW_PART
)
1440 dest
= XEXP (dest
, 0);
1442 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1443 each of whose first operand is a register. */
1444 if (GET_CODE (dest
) == PARALLEL
)
1446 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1447 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1448 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1451 (*fun
) (dest
, x
, data
);
1454 else if (GET_CODE (x
) == PARALLEL
)
1455 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1456 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1459 /* Like notes_stores, but call FUN for each expression that is being
1460 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1461 FUN for each expression, not any interior subexpressions. FUN receives a
1462 pointer to the expression and the DATA passed to this function.
1464 Note that this is not quite the same test as that done in reg_referenced_p
1465 since that considers something as being referenced if it is being
1466 partially set, while we do not. */
1469 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1474 switch (GET_CODE (body
))
1477 (*fun
) (&COND_EXEC_TEST (body
), data
);
1478 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1482 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1483 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1487 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1488 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1492 (*fun
) (&XEXP (body
, 0), data
);
1496 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1497 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1501 (*fun
) (&TRAP_CONDITION (body
), data
);
1505 (*fun
) (&XEXP (body
, 0), data
);
1509 case UNSPEC_VOLATILE
:
1510 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1511 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1515 if (MEM_P (XEXP (body
, 0)))
1516 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1521 rtx dest
= SET_DEST (body
);
1523 /* For sets we replace everything in source plus registers in memory
1524 expression in store and operands of a ZERO_EXTRACT. */
1525 (*fun
) (&SET_SRC (body
), data
);
1527 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1529 (*fun
) (&XEXP (dest
, 1), data
);
1530 (*fun
) (&XEXP (dest
, 2), data
);
1533 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1534 dest
= XEXP (dest
, 0);
1537 (*fun
) (&XEXP (dest
, 0), data
);
1542 /* All the other possibilities never store. */
1543 (*fun
) (pbody
, data
);
1548 /* Return nonzero if X's old contents don't survive after INSN.
1549 This will be true if X is (cc0) or if X is a register and
1550 X dies in INSN or because INSN entirely sets X.
1552 "Entirely set" means set directly and not through a SUBREG, or
1553 ZERO_EXTRACT, so no trace of the old contents remains.
1554 Likewise, REG_INC does not count.
1556 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1557 but for this use that makes no difference, since regs don't overlap
1558 during their lifetimes. Therefore, this function may be used
1559 at any time after deaths have been computed.
1561 If REG is a hard reg that occupies multiple machine registers, this
1562 function will only return 1 if each of those registers will be replaced
1566 dead_or_set_p (const_rtx insn
, const_rtx x
)
1568 unsigned int regno
, end_regno
;
1571 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1572 if (GET_CODE (x
) == CC0
)
1575 gcc_assert (REG_P (x
));
1578 end_regno
= END_REGNO (x
);
1579 for (i
= regno
; i
< end_regno
; i
++)
1580 if (! dead_or_set_regno_p (insn
, i
))
1586 /* Return TRUE iff DEST is a register or subreg of a register and
1587 doesn't change the number of words of the inner register, and any
1588 part of the register is TEST_REGNO. */
1591 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1593 unsigned int regno
, endregno
;
1595 if (GET_CODE (dest
) == SUBREG
1596 && (((GET_MODE_SIZE (GET_MODE (dest
))
1597 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1598 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1599 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1600 dest
= SUBREG_REG (dest
);
1605 regno
= REGNO (dest
);
1606 endregno
= END_REGNO (dest
);
1607 return (test_regno
>= regno
&& test_regno
< endregno
);
1610 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1611 any member matches the covers_regno_no_parallel_p criteria. */
1614 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1616 if (GET_CODE (dest
) == PARALLEL
)
1618 /* Some targets place small structures in registers for return
1619 values of functions, and those registers are wrapped in
1620 PARALLELs that we may see as the destination of a SET. */
1623 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1625 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1626 if (inner
!= NULL_RTX
1627 && covers_regno_no_parallel_p (inner
, test_regno
))
1634 return covers_regno_no_parallel_p (dest
, test_regno
);
1637 /* Utility function for dead_or_set_p to check an individual register. */
1640 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1644 /* See if there is a death note for something that includes TEST_REGNO. */
1645 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1649 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1652 pattern
= PATTERN (insn
);
1654 if (GET_CODE (pattern
) == COND_EXEC
)
1655 pattern
= COND_EXEC_CODE (pattern
);
1657 if (GET_CODE (pattern
) == SET
)
1658 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1659 else if (GET_CODE (pattern
) == PARALLEL
)
1663 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1665 rtx body
= XVECEXP (pattern
, 0, i
);
1667 if (GET_CODE (body
) == COND_EXEC
)
1668 body
= COND_EXEC_CODE (body
);
1670 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1671 && covers_regno_p (SET_DEST (body
), test_regno
))
1679 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1680 If DATUM is nonzero, look for one whose datum is DATUM. */
1683 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1687 gcc_checking_assert (insn
);
1689 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1690 if (! INSN_P (insn
))
1694 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1695 if (REG_NOTE_KIND (link
) == kind
)
1700 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1701 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1706 /* Return the reg-note of kind KIND in insn INSN which applies to register
1707 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1708 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1709 it might be the case that the note overlaps REGNO. */
1712 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1716 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1717 if (! INSN_P (insn
))
1720 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1721 if (REG_NOTE_KIND (link
) == kind
1722 /* Verify that it is a register, so that scratch and MEM won't cause a
1724 && REG_P (XEXP (link
, 0))
1725 && REGNO (XEXP (link
, 0)) <= regno
1726 && END_REGNO (XEXP (link
, 0)) > regno
)
1731 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1735 find_reg_equal_equiv_note (const_rtx insn
)
1742 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1743 if (REG_NOTE_KIND (link
) == REG_EQUAL
1744 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1746 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1747 insns that have multiple sets. Checking single_set to
1748 make sure of this is not the proper check, as explained
1749 in the comment in set_unique_reg_note.
1751 This should be changed into an assert. */
1752 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1759 /* Check whether INSN is a single_set whose source is known to be
1760 equivalent to a constant. Return that constant if so, otherwise
1764 find_constant_src (const_rtx insn
)
1768 set
= single_set (insn
);
1771 x
= avoid_constant_pool_reference (SET_SRC (set
));
1776 note
= find_reg_equal_equiv_note (insn
);
1777 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1778 return XEXP (note
, 0);
1783 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1784 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1787 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1789 /* If it's not a CALL_INSN, it can't possibly have a
1790 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1800 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1802 link
= XEXP (link
, 1))
1803 if (GET_CODE (XEXP (link
, 0)) == code
1804 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1809 unsigned int regno
= REGNO (datum
);
1811 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1812 to pseudo registers, so don't bother checking. */
1814 if (regno
< FIRST_PSEUDO_REGISTER
)
1816 unsigned int end_regno
= END_HARD_REGNO (datum
);
1819 for (i
= regno
; i
< end_regno
; i
++)
1820 if (find_regno_fusage (insn
, code
, i
))
1828 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1829 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1832 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
1836 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1837 to pseudo registers, so don't bother checking. */
1839 if (regno
>= FIRST_PSEUDO_REGISTER
1843 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1847 if (GET_CODE (op
= XEXP (link
, 0)) == code
1848 && REG_P (reg
= XEXP (op
, 0))
1849 && REGNO (reg
) <= regno
1850 && END_HARD_REGNO (reg
) > regno
)
1858 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1859 stored as the pointer to the next register note. */
1862 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
1870 case REG_LABEL_TARGET
:
1871 case REG_LABEL_OPERAND
:
1872 /* These types of register notes use an INSN_LIST rather than an
1873 EXPR_LIST, so that copying is done right and dumps look
1875 note
= alloc_INSN_LIST (datum
, list
);
1876 PUT_REG_NOTE_KIND (note
, kind
);
1880 note
= alloc_EXPR_LIST (kind
, datum
, list
);
1887 /* Add register note with kind KIND and datum DATUM to INSN. */
1890 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
1892 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
1895 /* Remove register note NOTE from the REG_NOTES of INSN. */
1898 remove_note (rtx insn
, const_rtx note
)
1902 if (note
== NULL_RTX
)
1905 if (REG_NOTES (insn
) == note
)
1906 REG_NOTES (insn
) = XEXP (note
, 1);
1908 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1909 if (XEXP (link
, 1) == note
)
1911 XEXP (link
, 1) = XEXP (note
, 1);
1915 switch (REG_NOTE_KIND (note
))
1919 df_notes_rescan (insn
);
1926 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1929 remove_reg_equal_equiv_notes (rtx insn
)
1933 loc
= ®_NOTES (insn
);
1936 enum reg_note kind
= REG_NOTE_KIND (*loc
);
1937 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
1938 *loc
= XEXP (*loc
, 1);
1940 loc
= &XEXP (*loc
, 1);
1944 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
1947 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
1954 /* This loop is a little tricky. We cannot just go down the chain because
1955 it is being modified by some actions in the loop. So we just iterate
1956 over the head. We plan to drain the list anyway. */
1957 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
1959 rtx insn
= DF_REF_INSN (eq_use
);
1960 rtx note
= find_reg_equal_equiv_note (insn
);
1962 /* This assert is generally triggered when someone deletes a REG_EQUAL
1963 or REG_EQUIV note by hacking the list manually rather than calling
1967 remove_note (insn
, note
);
1971 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1972 return 1 if it is found. A simple equality test is used to determine if
1976 in_expr_list_p (const_rtx listp
, const_rtx node
)
1980 for (x
= listp
; x
; x
= XEXP (x
, 1))
1981 if (node
== XEXP (x
, 0))
1987 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1988 remove that entry from the list if it is found.
1990 A simple equality test is used to determine if NODE matches. */
1993 remove_node_from_expr_list (const_rtx node
, rtx
*listp
)
1996 rtx prev
= NULL_RTX
;
2000 if (node
== XEXP (temp
, 0))
2002 /* Splice the node out of the list. */
2004 XEXP (prev
, 1) = XEXP (temp
, 1);
2006 *listp
= XEXP (temp
, 1);
2012 temp
= XEXP (temp
, 1);
2016 /* Nonzero if X contains any volatile instructions. These are instructions
2017 which may cause unpredictable machine state instructions, and thus no
2018 instructions should be moved or combined across them. This includes
2019 only volatile asms and UNSPEC_VOLATILE instructions. */
2022 volatile_insn_p (const_rtx x
)
2024 const RTX_CODE code
= GET_CODE (x
);
2045 case UNSPEC_VOLATILE
:
2046 /* case TRAP_IF: This isn't clear yet. */
2051 if (MEM_VOLATILE_P (x
))
2058 /* Recursively scan the operands of this expression. */
2061 const char *const fmt
= GET_RTX_FORMAT (code
);
2064 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2068 if (volatile_insn_p (XEXP (x
, i
)))
2071 else if (fmt
[i
] == 'E')
2074 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2075 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2083 /* Nonzero if X contains any volatile memory references
2084 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2087 volatile_refs_p (const_rtx x
)
2089 const RTX_CODE code
= GET_CODE (x
);
2108 case UNSPEC_VOLATILE
:
2114 if (MEM_VOLATILE_P (x
))
2121 /* Recursively scan the operands of this expression. */
2124 const char *const fmt
= GET_RTX_FORMAT (code
);
2127 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2131 if (volatile_refs_p (XEXP (x
, i
)))
2134 else if (fmt
[i
] == 'E')
2137 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2138 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2146 /* Similar to above, except that it also rejects register pre- and post-
2150 side_effects_p (const_rtx x
)
2152 const RTX_CODE code
= GET_CODE (x
);
2172 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2173 when some combination can't be done. If we see one, don't think
2174 that we can simplify the expression. */
2175 return (GET_MODE (x
) != VOIDmode
);
2184 case UNSPEC_VOLATILE
:
2185 /* case TRAP_IF: This isn't clear yet. */
2191 if (MEM_VOLATILE_P (x
))
2198 /* Recursively scan the operands of this expression. */
2201 const char *fmt
= GET_RTX_FORMAT (code
);
2204 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2208 if (side_effects_p (XEXP (x
, i
)))
2211 else if (fmt
[i
] == 'E')
2214 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2215 if (side_effects_p (XVECEXP (x
, i
, j
)))
2223 /* Return nonzero if evaluating rtx X might cause a trap.
2224 FLAGS controls how to consider MEMs. A nonzero means the context
2225 of the access may have changed from the original, such that the
2226 address may have become invalid. */
2229 may_trap_p_1 (const_rtx x
, unsigned flags
)
2235 /* We make no distinction currently, but this function is part of
2236 the internal target-hooks ABI so we keep the parameter as
2237 "unsigned flags". */
2238 bool code_changed
= flags
!= 0;
2242 code
= GET_CODE (x
);
2245 /* Handle these cases quickly. */
2260 case UNSPEC_VOLATILE
:
2261 return targetm
.unspec_may_trap_p (x
, flags
);
2268 return MEM_VOLATILE_P (x
);
2270 /* Memory ref can trap unless it's a static var or a stack slot. */
2272 /* Recognize specific pattern of stack checking probes. */
2273 if (flag_stack_check
2274 && MEM_VOLATILE_P (x
)
2275 && XEXP (x
, 0) == stack_pointer_rtx
)
2277 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2278 reference; moving it out of context such as when moving code
2279 when optimizing, might cause its address to become invalid. */
2281 || !MEM_NOTRAP_P (x
))
2283 HOST_WIDE_INT size
= MEM_SIZE (x
) ? INTVAL (MEM_SIZE (x
)) : 0;
2284 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2285 GET_MODE (x
), code_changed
);
2290 /* Division by a non-constant might trap. */
2295 if (HONOR_SNANS (GET_MODE (x
)))
2297 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2298 return flag_trapping_math
;
2299 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2304 /* An EXPR_LIST is used to represent a function call. This
2305 certainly may trap. */
2314 /* Some floating point comparisons may trap. */
2315 if (!flag_trapping_math
)
2317 /* ??? There is no machine independent way to check for tests that trap
2318 when COMPARE is used, though many targets do make this distinction.
2319 For instance, sparc uses CCFPE for compares which generate exceptions
2320 and CCFP for compares which do not generate exceptions. */
2321 if (HONOR_NANS (GET_MODE (x
)))
2323 /* But often the compare has some CC mode, so check operand
2325 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2326 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2332 if (HONOR_SNANS (GET_MODE (x
)))
2334 /* Often comparison is CC mode, so check operand modes. */
2335 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2336 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2341 /* Conversion of floating point might trap. */
2342 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2349 /* These operations don't trap even with floating point. */
2353 /* Any floating arithmetic may trap. */
2354 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
))
2355 && flag_trapping_math
)
2359 fmt
= GET_RTX_FORMAT (code
);
2360 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2364 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2367 else if (fmt
[i
] == 'E')
2370 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2371 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2378 /* Return nonzero if evaluating rtx X might cause a trap. */
2381 may_trap_p (const_rtx x
)
2383 return may_trap_p_1 (x
, 0);
2386 /* Same as above, but additionally return nonzero if evaluating rtx X might
2387 cause a fault. We define a fault for the purpose of this function as a
2388 erroneous execution condition that cannot be encountered during the normal
2389 execution of a valid program; the typical example is an unaligned memory
2390 access on a strict alignment machine. The compiler guarantees that it
2391 doesn't generate code that will fault from a valid program, but this
2392 guarantee doesn't mean anything for individual instructions. Consider
2393 the following example:
2395 struct S { int d; union { char *cp; int *ip; }; };
2397 int foo(struct S *s)
2405 on a strict alignment machine. In a valid program, foo will never be
2406 invoked on a structure for which d is equal to 1 and the underlying
2407 unique field of the union not aligned on a 4-byte boundary, but the
2408 expression *s->ip might cause a fault if considered individually.
2410 At the RTL level, potentially problematic expressions will almost always
2411 verify may_trap_p; for example, the above dereference can be emitted as
2412 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2413 However, suppose that foo is inlined in a caller that causes s->cp to
2414 point to a local character variable and guarantees that s->d is not set
2415 to 1; foo may have been effectively translated into pseudo-RTL as:
2418 (set (reg:SI) (mem:SI (%fp - 7)))
2420 (set (reg:QI) (mem:QI (%fp - 7)))
2422 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2423 memory reference to a stack slot, but it will certainly cause a fault
2424 on a strict alignment machine. */
2427 may_trap_or_fault_p (const_rtx x
)
2429 return may_trap_p_1 (x
, 1);
2432 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2433 i.e., an inequality. */
2436 inequality_comparisons_p (const_rtx x
)
2440 const enum rtx_code code
= GET_CODE (x
);
2471 len
= GET_RTX_LENGTH (code
);
2472 fmt
= GET_RTX_FORMAT (code
);
2474 for (i
= 0; i
< len
; i
++)
2478 if (inequality_comparisons_p (XEXP (x
, i
)))
2481 else if (fmt
[i
] == 'E')
2484 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2485 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2493 /* Replace any occurrence of FROM in X with TO. The function does
2494 not enter into CONST_DOUBLE for the replace.
2496 Note that copying is not done so X must not be shared unless all copies
2497 are to be modified. */
2500 replace_rtx (rtx x
, rtx from
, rtx to
)
2505 /* The following prevents loops occurrence when we change MEM in
2506 CONST_DOUBLE onto the same CONST_DOUBLE. */
2507 if (x
!= 0 && GET_CODE (x
) == CONST_DOUBLE
)
2513 /* Allow this function to make replacements in EXPR_LISTs. */
2517 if (GET_CODE (x
) == SUBREG
)
2519 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2521 if (CONST_INT_P (new_rtx
))
2523 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2524 GET_MODE (SUBREG_REG (x
)),
2529 SUBREG_REG (x
) = new_rtx
;
2533 else if (GET_CODE (x
) == ZERO_EXTEND
)
2535 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2537 if (CONST_INT_P (new_rtx
))
2539 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2540 new_rtx
, GET_MODE (XEXP (x
, 0)));
2544 XEXP (x
, 0) = new_rtx
;
2549 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2550 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2553 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2554 else if (fmt
[i
] == 'E')
2555 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2556 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2562 /* Replace occurrences of the old label in *X with the new one.
2563 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2566 replace_label (rtx
*x
, void *data
)
2569 rtx old_label
= ((replace_label_data
*) data
)->r1
;
2570 rtx new_label
= ((replace_label_data
*) data
)->r2
;
2571 bool update_label_nuses
= ((replace_label_data
*) data
)->update_label_nuses
;
2576 if (GET_CODE (l
) == SYMBOL_REF
2577 && CONSTANT_POOL_ADDRESS_P (l
))
2579 rtx c
= get_pool_constant (l
);
2580 if (rtx_referenced_p (old_label
, c
))
2583 replace_label_data
*d
= (replace_label_data
*) data
;
2585 /* Create a copy of constant C; replace the label inside
2586 but do not update LABEL_NUSES because uses in constant pool
2588 new_c
= copy_rtx (c
);
2589 d
->update_label_nuses
= false;
2590 for_each_rtx (&new_c
, replace_label
, data
);
2591 d
->update_label_nuses
= update_label_nuses
;
2593 /* Add the new constant NEW_C to constant pool and replace
2594 the old reference to constant by new reference. */
2595 new_l
= XEXP (force_const_mem (get_pool_mode (l
), new_c
), 0);
2596 *x
= replace_rtx (l
, l
, new_l
);
2601 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2602 field. This is not handled by for_each_rtx because it doesn't
2603 handle unprinted ('0') fields. */
2604 if (JUMP_P (l
) && JUMP_LABEL (l
) == old_label
)
2605 JUMP_LABEL (l
) = new_label
;
2607 if ((GET_CODE (l
) == LABEL_REF
2608 || GET_CODE (l
) == INSN_LIST
)
2609 && XEXP (l
, 0) == old_label
)
2611 XEXP (l
, 0) = new_label
;
2612 if (update_label_nuses
)
2614 ++LABEL_NUSES (new_label
);
2615 --LABEL_NUSES (old_label
);
2623 /* When *BODY is equal to X or X is directly referenced by *BODY
2624 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2625 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2628 rtx_referenced_p_1 (rtx
*body
, void *x
)
2632 if (*body
== NULL_RTX
)
2633 return y
== NULL_RTX
;
2635 /* Return true if a label_ref *BODY refers to label Y. */
2636 if (GET_CODE (*body
) == LABEL_REF
&& LABEL_P (y
))
2637 return XEXP (*body
, 0) == y
;
2639 /* If *BODY is a reference to pool constant traverse the constant. */
2640 if (GET_CODE (*body
) == SYMBOL_REF
2641 && CONSTANT_POOL_ADDRESS_P (*body
))
2642 return rtx_referenced_p (y
, get_pool_constant (*body
));
2644 /* By default, compare the RTL expressions. */
2645 return rtx_equal_p (*body
, y
);
2648 /* Return true if X is referenced in BODY. */
2651 rtx_referenced_p (rtx x
, rtx body
)
2653 return for_each_rtx (&body
, rtx_referenced_p_1
, x
);
2656 /* If INSN is a tablejump return true and store the label (before jump table) to
2657 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2660 tablejump_p (const_rtx insn
, rtx
*labelp
, rtx
*tablep
)
2665 && (label
= JUMP_LABEL (insn
)) != NULL_RTX
2666 && (table
= next_active_insn (label
)) != NULL_RTX
2667 && JUMP_TABLE_DATA_P (table
))
2678 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2679 constant that is not in the constant pool and not in the condition
2680 of an IF_THEN_ELSE. */
2683 computed_jump_p_1 (const_rtx x
)
2685 const enum rtx_code code
= GET_CODE (x
);
2705 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2706 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2709 return (computed_jump_p_1 (XEXP (x
, 1))
2710 || computed_jump_p_1 (XEXP (x
, 2)));
2716 fmt
= GET_RTX_FORMAT (code
);
2717 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2720 && computed_jump_p_1 (XEXP (x
, i
)))
2723 else if (fmt
[i
] == 'E')
2724 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2725 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2732 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2734 Tablejumps and casesi insns are not considered indirect jumps;
2735 we can recognize them by a (use (label_ref)). */
2738 computed_jump_p (const_rtx insn
)
2743 rtx pat
= PATTERN (insn
);
2745 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2746 if (JUMP_LABEL (insn
) != NULL
)
2749 if (GET_CODE (pat
) == PARALLEL
)
2751 int len
= XVECLEN (pat
, 0);
2752 int has_use_labelref
= 0;
2754 for (i
= len
- 1; i
>= 0; i
--)
2755 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2756 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2758 has_use_labelref
= 1;
2760 if (! has_use_labelref
)
2761 for (i
= len
- 1; i
>= 0; i
--)
2762 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2763 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2764 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2767 else if (GET_CODE (pat
) == SET
2768 && SET_DEST (pat
) == pc_rtx
2769 && computed_jump_p_1 (SET_SRC (pat
)))
2775 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2776 calls. Processes the subexpressions of EXP and passes them to F. */
2778 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
2781 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
2784 for (; format
[n
] != '\0'; n
++)
2791 result
= (*f
) (x
, data
);
2793 /* Do not traverse sub-expressions. */
2795 else if (result
!= 0)
2796 /* Stop the traversal. */
2800 /* There are no sub-expressions. */
2803 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2806 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2814 if (XVEC (exp
, n
) == 0)
2816 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
2819 x
= &XVECEXP (exp
, n
, j
);
2820 result
= (*f
) (x
, data
);
2822 /* Do not traverse sub-expressions. */
2824 else if (result
!= 0)
2825 /* Stop the traversal. */
2829 /* There are no sub-expressions. */
2832 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2835 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2843 /* Nothing to do. */
2851 /* Traverse X via depth-first search, calling F for each
2852 sub-expression (including X itself). F is also passed the DATA.
2853 If F returns -1, do not traverse sub-expressions, but continue
2854 traversing the rest of the tree. If F ever returns any other
2855 nonzero value, stop the traversal, and return the value returned
2856 by F. Otherwise, return 0. This function does not traverse inside
2857 tree structure that contains RTX_EXPRs, or into sub-expressions
2858 whose format code is `0' since it is not known whether or not those
2859 codes are actually RTL.
2861 This routine is very general, and could (should?) be used to
2862 implement many of the other routines in this file. */
2865 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
2871 result
= (*f
) (x
, data
);
2873 /* Do not traverse sub-expressions. */
2875 else if (result
!= 0)
2876 /* Stop the traversal. */
2880 /* There are no sub-expressions. */
2883 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2887 return for_each_rtx_1 (*x
, i
, f
, data
);
2891 /* Searches X for any reference to REGNO, returning the rtx of the
2892 reference found if any. Otherwise, returns NULL_RTX. */
2895 regno_use_in (unsigned int regno
, rtx x
)
2901 if (REG_P (x
) && REGNO (x
) == regno
)
2904 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2905 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2909 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
2912 else if (fmt
[i
] == 'E')
2913 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2914 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
2921 /* Return a value indicating whether OP, an operand of a commutative
2922 operation, is preferred as the first or second operand. The higher
2923 the value, the stronger the preference for being the first operand.
2924 We use negative values to indicate a preference for the first operand
2925 and positive values for the second operand. */
2928 commutative_operand_precedence (rtx op
)
2930 enum rtx_code code
= GET_CODE (op
);
2932 /* Constants always come the second operand. Prefer "nice" constants. */
2933 if (code
== CONST_INT
)
2935 if (code
== CONST_DOUBLE
)
2937 if (code
== CONST_FIXED
)
2939 op
= avoid_constant_pool_reference (op
);
2940 code
= GET_CODE (op
);
2942 switch (GET_RTX_CLASS (code
))
2945 if (code
== CONST_INT
)
2947 if (code
== CONST_DOUBLE
)
2949 if (code
== CONST_FIXED
)
2954 /* SUBREGs of objects should come second. */
2955 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
2960 /* Complex expressions should be the first, so decrease priority
2961 of objects. Prefer pointer objects over non pointer objects. */
2962 if ((REG_P (op
) && REG_POINTER (op
))
2963 || (MEM_P (op
) && MEM_POINTER (op
)))
2967 case RTX_COMM_ARITH
:
2968 /* Prefer operands that are themselves commutative to be first.
2969 This helps to make things linear. In particular,
2970 (and (and (reg) (reg)) (not (reg))) is canonical. */
2974 /* If only one operand is a binary expression, it will be the first
2975 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2976 is canonical, although it will usually be further simplified. */
2980 /* Then prefer NEG and NOT. */
2981 if (code
== NEG
|| code
== NOT
)
2989 /* Return 1 iff it is necessary to swap operands of commutative operation
2990 in order to canonicalize expression. */
2993 swap_commutative_operands_p (rtx x
, rtx y
)
2995 return (commutative_operand_precedence (x
)
2996 < commutative_operand_precedence (y
));
2999 /* Return 1 if X is an autoincrement side effect and the register is
3000 not the stack pointer. */
3002 auto_inc_p (const_rtx x
)
3004 switch (GET_CODE (x
))
3012 /* There are no REG_INC notes for SP. */
3013 if (XEXP (x
, 0) != stack_pointer_rtx
)
3021 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3023 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3032 code
= GET_CODE (in
);
3033 fmt
= GET_RTX_FORMAT (code
);
3034 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3038 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3041 else if (fmt
[i
] == 'E')
3042 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3043 if (loc
== &XVECEXP (in
, i
, j
)
3044 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3050 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3051 and SUBREG_BYTE, return the bit offset where the subreg begins
3052 (counting from the least significant bit of the operand). */
3055 subreg_lsb_1 (enum machine_mode outer_mode
,
3056 enum machine_mode inner_mode
,
3057 unsigned int subreg_byte
)
3059 unsigned int bitpos
;
3063 /* A paradoxical subreg begins at bit position 0. */
3064 if (GET_MODE_BITSIZE (outer_mode
) > GET_MODE_BITSIZE (inner_mode
))
3067 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3068 /* If the subreg crosses a word boundary ensure that
3069 it also begins and ends on a word boundary. */
3070 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3071 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3072 && (subreg_byte
% UNITS_PER_WORD
3073 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3075 if (WORDS_BIG_ENDIAN
)
3076 word
= (GET_MODE_SIZE (inner_mode
)
3077 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3079 word
= subreg_byte
/ UNITS_PER_WORD
;
3080 bitpos
= word
* BITS_PER_WORD
;
3082 if (BYTES_BIG_ENDIAN
)
3083 byte
= (GET_MODE_SIZE (inner_mode
)
3084 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3086 byte
= subreg_byte
% UNITS_PER_WORD
;
3087 bitpos
+= byte
* BITS_PER_UNIT
;
3092 /* Given a subreg X, return the bit offset where the subreg begins
3093 (counting from the least significant bit of the reg). */
3096 subreg_lsb (const_rtx x
)
3098 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3102 /* Fill in information about a subreg of a hard register.
3103 xregno - A regno of an inner hard subreg_reg (or what will become one).
3104 xmode - The mode of xregno.
3105 offset - The byte offset.
3106 ymode - The mode of a top level SUBREG (or what may become one).
3107 info - Pointer to structure to fill in. */
3109 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3110 unsigned int offset
, enum machine_mode ymode
,
3111 struct subreg_info
*info
)
3113 int nregs_xmode
, nregs_ymode
;
3114 int mode_multiple
, nregs_multiple
;
3115 int offset_adj
, y_offset
, y_offset_adj
;
3116 int regsize_xmode
, regsize_ymode
;
3119 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3123 /* If there are holes in a non-scalar mode in registers, we expect
3124 that it is made up of its units concatenated together. */
3125 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3127 enum machine_mode xmode_unit
;
3129 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3130 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3133 xmode_unit
= GET_MODE_INNER (xmode
);
3134 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3135 gcc_assert (nregs_xmode
3136 == (GET_MODE_NUNITS (xmode
)
3137 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3138 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3139 == (hard_regno_nregs
[xregno
][xmode_unit
]
3140 * GET_MODE_NUNITS (xmode
)));
3142 /* You can only ask for a SUBREG of a value with holes in the middle
3143 if you don't cross the holes. (Such a SUBREG should be done by
3144 picking a different register class, or doing it in memory if
3145 necessary.) An example of a value with holes is XCmode on 32-bit
3146 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3147 3 for each part, but in memory it's two 128-bit parts.
3148 Padding is assumed to be at the end (not necessarily the 'high part')
3150 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3151 < GET_MODE_NUNITS (xmode
))
3152 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3153 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3154 / GET_MODE_SIZE (xmode_unit
))))
3156 info
->representable_p
= false;
3161 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3163 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3165 /* Paradoxical subregs are otherwise valid. */
3168 && GET_MODE_SIZE (ymode
) > GET_MODE_SIZE (xmode
))
3170 info
->representable_p
= true;
3171 /* If this is a big endian paradoxical subreg, which uses more
3172 actual hard registers than the original register, we must
3173 return a negative offset so that we find the proper highpart
3175 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3176 ? WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3177 info
->offset
= nregs_xmode
- nregs_ymode
;
3180 info
->nregs
= nregs_ymode
;
3184 /* If registers store different numbers of bits in the different
3185 modes, we cannot generally form this subreg. */
3186 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3187 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3188 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3189 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3191 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3192 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3193 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3195 info
->representable_p
= false;
3197 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3198 info
->offset
= offset
/ regsize_xmode
;
3201 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3203 info
->representable_p
= false;
3205 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3206 info
->offset
= offset
/ regsize_xmode
;
3211 /* Lowpart subregs are otherwise valid. */
3212 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3214 info
->representable_p
= true;
3217 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3220 info
->nregs
= nregs_ymode
;
3225 /* This should always pass, otherwise we don't know how to verify
3226 the constraint. These conditions may be relaxed but
3227 subreg_regno_offset would need to be redesigned. */
3228 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3229 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3231 /* The XMODE value can be seen as a vector of NREGS_XMODE
3232 values. The subreg must represent a lowpart of given field.
3233 Compute what field it is. */
3234 offset_adj
= offset
;
3235 offset_adj
-= subreg_lowpart_offset (ymode
,
3236 mode_for_size (GET_MODE_BITSIZE (xmode
)
3240 /* Size of ymode must not be greater than the size of xmode. */
3241 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3242 gcc_assert (mode_multiple
!= 0);
3244 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3245 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3246 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3248 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3249 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3253 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3256 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3257 info
->nregs
= nregs_ymode
;
3260 /* This function returns the regno offset of a subreg expression.
3261 xregno - A regno of an inner hard subreg_reg (or what will become one).
3262 xmode - The mode of xregno.
3263 offset - The byte offset.
3264 ymode - The mode of a top level SUBREG (or what may become one).
3265 RETURN - The regno offset which would be used. */
3267 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3268 unsigned int offset
, enum machine_mode ymode
)
3270 struct subreg_info info
;
3271 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3275 /* This function returns true when the offset is representable via
3276 subreg_offset in the given regno.
3277 xregno - A regno of an inner hard subreg_reg (or what will become one).
3278 xmode - The mode of xregno.
3279 offset - The byte offset.
3280 ymode - The mode of a top level SUBREG (or what may become one).
3281 RETURN - Whether the offset is representable. */
3283 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3284 unsigned int offset
, enum machine_mode ymode
)
3286 struct subreg_info info
;
3287 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3288 return info
.representable_p
;
3291 /* Return the number of a YMODE register to which
3293 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3295 can be simplified. Return -1 if the subreg can't be simplified.
3297 XREGNO is a hard register number. */
3300 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3301 unsigned int offset
, enum machine_mode ymode
)
3303 struct subreg_info info
;
3304 unsigned int yregno
;
3306 #ifdef CANNOT_CHANGE_MODE_CLASS
3307 /* Give the backend a chance to disallow the mode change. */
3308 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3309 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3310 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
))
3314 /* We shouldn't simplify stack-related registers. */
3315 if ((!reload_completed
|| frame_pointer_needed
)
3316 && xregno
== FRAME_POINTER_REGNUM
)
3319 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3320 && xregno
== ARG_POINTER_REGNUM
)
3323 if (xregno
== STACK_POINTER_REGNUM
)
3326 /* Try to get the register offset. */
3327 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3328 if (!info
.representable_p
)
3331 /* Make sure that the offsetted register value is in range. */
3332 yregno
= xregno
+ info
.offset
;
3333 if (!HARD_REGISTER_NUM_P (yregno
))
3336 /* See whether (reg:YMODE YREGNO) is valid.
3338 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3339 This is a kludge to work around how float/complex arguments are passed
3340 on 32-bit SPARC and should be fixed. */
3341 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3342 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3345 return (int) yregno
;
3348 /* Return the final regno that a subreg expression refers to. */
3350 subreg_regno (const_rtx x
)
3353 rtx subreg
= SUBREG_REG (x
);
3354 int regno
= REGNO (subreg
);
3356 ret
= regno
+ subreg_regno_offset (regno
,
3364 /* Return the number of registers that a subreg expression refers
3367 subreg_nregs (const_rtx x
)
3369 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3372 /* Return the number of registers that a subreg REG with REGNO
3373 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3374 changed so that the regno can be passed in. */
3377 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3379 struct subreg_info info
;
3380 rtx subreg
= SUBREG_REG (x
);
3382 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3388 struct parms_set_data
3394 /* Helper function for noticing stores to parameter registers. */
3396 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3398 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3399 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3400 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3402 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3407 /* Look backward for first parameter to be loaded.
3408 Note that loads of all parameters will not necessarily be
3409 found if CSE has eliminated some of them (e.g., an argument
3410 to the outer function is passed down as a parameter).
3411 Do not skip BOUNDARY. */
3413 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3415 struct parms_set_data parm
;
3416 rtx p
, before
, first_set
;
3418 /* Since different machines initialize their parameter registers
3419 in different orders, assume nothing. Collect the set of all
3420 parameter registers. */
3421 CLEAR_HARD_REG_SET (parm
.regs
);
3423 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3424 if (GET_CODE (XEXP (p
, 0)) == USE
3425 && REG_P (XEXP (XEXP (p
, 0), 0)))
3427 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3429 /* We only care about registers which can hold function
3431 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3434 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3438 first_set
= call_insn
;
3440 /* Search backward for the first set of a register in this set. */
3441 while (parm
.nregs
&& before
!= boundary
)
3443 before
= PREV_INSN (before
);
3445 /* It is possible that some loads got CSEed from one call to
3446 another. Stop in that case. */
3447 if (CALL_P (before
))
3450 /* Our caller needs either ensure that we will find all sets
3451 (in case code has not been optimized yet), or take care
3452 for possible labels in a way by setting boundary to preceding
3454 if (LABEL_P (before
))
3456 gcc_assert (before
== boundary
);
3460 if (INSN_P (before
))
3462 int nregs_old
= parm
.nregs
;
3463 note_stores (PATTERN (before
), parms_set
, &parm
);
3464 /* If we found something that did not set a parameter reg,
3465 we're done. Do not keep going, as that might result
3466 in hoisting an insn before the setting of a pseudo
3467 that is used by the hoisted insn. */
3468 if (nregs_old
!= parm
.nregs
)
3477 /* Return true if we should avoid inserting code between INSN and preceding
3478 call instruction. */
3481 keep_with_call_p (const_rtx insn
)
3485 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3487 if (REG_P (SET_DEST (set
))
3488 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3489 && fixed_regs
[REGNO (SET_DEST (set
))]
3490 && general_operand (SET_SRC (set
), VOIDmode
))
3492 if (REG_P (SET_SRC (set
))
3493 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3494 && REG_P (SET_DEST (set
))
3495 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3497 /* There may be a stack pop just after the call and before the store
3498 of the return register. Search for the actual store when deciding
3499 if we can break or not. */
3500 if (SET_DEST (set
) == stack_pointer_rtx
)
3502 /* This CONST_CAST is okay because next_nonnote_insn just
3503 returns its argument and we assign it to a const_rtx
3505 const_rtx i2
= next_nonnote_insn (CONST_CAST_RTX(insn
));
3506 if (i2
&& keep_with_call_p (i2
))
3513 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3514 to non-complex jumps. That is, direct unconditional, conditional,
3515 and tablejumps, but not computed jumps or returns. It also does
3516 not apply to the fallthru case of a conditional jump. */
3519 label_is_jump_target_p (const_rtx label
, const_rtx jump_insn
)
3521 rtx tmp
= JUMP_LABEL (jump_insn
);
3526 if (tablejump_p (jump_insn
, NULL
, &tmp
))
3528 rtvec vec
= XVEC (PATTERN (tmp
),
3529 GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
);
3530 int i
, veclen
= GET_NUM_ELEM (vec
);
3532 for (i
= 0; i
< veclen
; ++i
)
3533 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3537 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3544 /* Return an estimate of the cost of computing rtx X.
3545 One use is in cse, to decide which expression to keep in the hash table.
3546 Another is in rtl generation, to pick the cheapest way to multiply.
3547 Other uses like the latter are expected in the future.
3549 SPEED parameter specify whether costs optimized for speed or size should
3553 rtx_cost (rtx x
, enum rtx_code outer_code ATTRIBUTE_UNUSED
, bool speed
)
3563 /* Compute the default costs of certain things.
3564 Note that targetm.rtx_costs can override the defaults. */
3566 code
= GET_CODE (x
);
3570 total
= COSTS_N_INSNS (5);
3576 total
= COSTS_N_INSNS (7);
3579 /* Used in combine.c as a marker. */
3583 total
= COSTS_N_INSNS (1);
3593 /* If we can't tie these modes, make this expensive. The larger
3594 the mode, the more expensive it is. */
3595 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3596 return COSTS_N_INSNS (2
3597 + GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
);
3601 if (targetm
.rtx_costs (x
, code
, outer_code
, &total
, speed
))
3606 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3607 which is already in total. */
3609 fmt
= GET_RTX_FORMAT (code
);
3610 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3612 total
+= rtx_cost (XEXP (x
, i
), code
, speed
);
3613 else if (fmt
[i
] == 'E')
3614 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3615 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, speed
);
3620 /* Fill in the structure C with information about both speed and size rtx
3621 costs for X, with outer code OUTER. */
3624 get_full_rtx_cost (rtx x
, enum rtx_code outer
, struct full_rtx_costs
*c
)
3626 c
->speed
= rtx_cost (x
, outer
, true);
3627 c
->size
= rtx_cost (x
, outer
, false);
3631 /* Return cost of address expression X.
3632 Expect that X is properly formed address reference.
3634 SPEED parameter specify whether costs optimized for speed or size should
3638 address_cost (rtx x
, enum machine_mode mode
, addr_space_t as
, bool speed
)
3640 /* We may be asked for cost of various unusual addresses, such as operands
3641 of push instruction. It is not worthwhile to complicate writing
3642 of the target hook by such cases. */
3644 if (!memory_address_addr_space_p (mode
, x
, as
))
3647 return targetm
.address_cost (x
, speed
);
3650 /* If the target doesn't override, compute the cost as with arithmetic. */
3653 default_address_cost (rtx x
, bool speed
)
3655 return rtx_cost (x
, MEM
, speed
);
3659 unsigned HOST_WIDE_INT
3660 nonzero_bits (const_rtx x
, enum machine_mode mode
)
3662 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3666 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
3668 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3671 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3672 It avoids exponential behavior in nonzero_bits1 when X has
3673 identical subexpressions on the first or the second level. */
3675 static unsigned HOST_WIDE_INT
3676 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3677 enum machine_mode known_mode
,
3678 unsigned HOST_WIDE_INT known_ret
)
3680 if (x
== known_x
&& mode
== known_mode
)
3683 /* Try to find identical subexpressions. If found call
3684 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3685 precomputed value for the subexpression as KNOWN_RET. */
3687 if (ARITHMETIC_P (x
))
3689 rtx x0
= XEXP (x
, 0);
3690 rtx x1
= XEXP (x
, 1);
3692 /* Check the first level. */
3694 return nonzero_bits1 (x
, mode
, x0
, mode
,
3695 cached_nonzero_bits (x0
, mode
, known_x
,
3696 known_mode
, known_ret
));
3698 /* Check the second level. */
3699 if (ARITHMETIC_P (x0
)
3700 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3701 return nonzero_bits1 (x
, mode
, x1
, mode
,
3702 cached_nonzero_bits (x1
, mode
, known_x
,
3703 known_mode
, known_ret
));
3705 if (ARITHMETIC_P (x1
)
3706 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3707 return nonzero_bits1 (x
, mode
, x0
, mode
,
3708 cached_nonzero_bits (x0
, mode
, known_x
,
3709 known_mode
, known_ret
));
3712 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3715 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3716 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3717 is less useful. We can't allow both, because that results in exponential
3718 run time recursion. There is a nullstone testcase that triggered
3719 this. This macro avoids accidental uses of num_sign_bit_copies. */
3720 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3722 /* Given an expression, X, compute which bits in X can be nonzero.
3723 We don't care about bits outside of those defined in MODE.
3725 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3726 an arithmetic operation, we can do better. */
3728 static unsigned HOST_WIDE_INT
3729 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3730 enum machine_mode known_mode
,
3731 unsigned HOST_WIDE_INT known_ret
)
3733 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3734 unsigned HOST_WIDE_INT inner_nz
;
3736 unsigned int mode_width
= GET_MODE_BITSIZE (mode
);
3738 /* For floating-point and vector values, assume all bits are needed. */
3739 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
3740 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
3743 /* If X is wider than MODE, use its mode instead. */
3744 if (GET_MODE_BITSIZE (GET_MODE (x
)) > mode_width
)
3746 mode
= GET_MODE (x
);
3747 nonzero
= GET_MODE_MASK (mode
);
3748 mode_width
= GET_MODE_BITSIZE (mode
);
3751 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
3752 /* Our only callers in this case look for single bit values. So
3753 just return the mode mask. Those tests will then be false. */
3756 #ifndef WORD_REGISTER_OPERATIONS
3757 /* If MODE is wider than X, but both are a single word for both the host
3758 and target machines, we can compute this from which bits of the
3759 object might be nonzero in its own mode, taking into account the fact
3760 that on many CISC machines, accessing an object in a wider mode
3761 causes the high-order bits to become undefined. So they are
3762 not known to be zero. */
3764 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
3765 && GET_MODE_BITSIZE (GET_MODE (x
)) <= BITS_PER_WORD
3766 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
3767 && GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (GET_MODE (x
)))
3769 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
3770 known_x
, known_mode
, known_ret
);
3771 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
3776 code
= GET_CODE (x
);
3780 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3781 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3782 all the bits above ptr_mode are known to be zero. */
3783 /* As we do not know which address space the pointer is refering to,
3784 we can do this only if the target does not support different pointer
3785 or address modes depending on the address space. */
3786 if (target_default_pointer_address_modes_p ()
3787 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
3789 nonzero
&= GET_MODE_MASK (ptr_mode
);
3792 /* Include declared information about alignment of pointers. */
3793 /* ??? We don't properly preserve REG_POINTER changes across
3794 pointer-to-integer casts, so we can't trust it except for
3795 things that we know must be pointers. See execute/960116-1.c. */
3796 if ((x
== stack_pointer_rtx
3797 || x
== frame_pointer_rtx
3798 || x
== arg_pointer_rtx
)
3799 && REGNO_POINTER_ALIGN (REGNO (x
)))
3801 unsigned HOST_WIDE_INT alignment
3802 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
3804 #ifdef PUSH_ROUNDING
3805 /* If PUSH_ROUNDING is defined, it is possible for the
3806 stack to be momentarily aligned only to that amount,
3807 so we pick the least alignment. */
3808 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
3809 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
3813 nonzero
&= ~(alignment
- 1);
3817 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
3818 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
3819 known_mode
, known_ret
,
3823 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
3824 known_mode
, known_ret
);
3826 return nonzero_for_hook
;
3830 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3831 /* If X is negative in MODE, sign-extend the value. */
3833 && mode_width
< BITS_PER_WORD
3834 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
3836 return UINTVAL (x
) | ((unsigned HOST_WIDE_INT
) (-1) << mode_width
);
3842 #ifdef LOAD_EXTEND_OP
3843 /* In many, if not most, RISC machines, reading a byte from memory
3844 zeros the rest of the register. Noticing that fact saves a lot
3845 of extra zero-extends. */
3846 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
3847 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
3852 case UNEQ
: case LTGT
:
3853 case GT
: case GTU
: case UNGT
:
3854 case LT
: case LTU
: case UNLT
:
3855 case GE
: case GEU
: case UNGE
:
3856 case LE
: case LEU
: case UNLE
:
3857 case UNORDERED
: case ORDERED
:
3858 /* If this produces an integer result, we know which bits are set.
3859 Code here used to clear bits outside the mode of X, but that is
3861 /* Mind that MODE is the mode the caller wants to look at this
3862 operation in, and not the actual operation mode. We can wind
3863 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3864 that describes the results of a vector compare. */
3865 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
3866 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
3867 nonzero
= STORE_FLAG_VALUE
;
3872 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3873 and num_sign_bit_copies. */
3874 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
3875 == GET_MODE_BITSIZE (GET_MODE (x
)))
3879 if (GET_MODE_SIZE (GET_MODE (x
)) < mode_width
)
3880 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
3885 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3886 and num_sign_bit_copies. */
3887 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
3888 == GET_MODE_BITSIZE (GET_MODE (x
)))
3894 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
3895 known_x
, known_mode
, known_ret
)
3896 & GET_MODE_MASK (mode
));
3900 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
3901 known_x
, known_mode
, known_ret
);
3902 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
3903 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
3907 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3908 Otherwise, show all the bits in the outer mode but not the inner
3910 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
3911 known_x
, known_mode
, known_ret
);
3912 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
3914 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
3916 & (((unsigned HOST_WIDE_INT
) 1
3917 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0))) - 1))))
3918 inner_nz
|= (GET_MODE_MASK (mode
)
3919 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
3922 nonzero
&= inner_nz
;
3926 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
3927 known_x
, known_mode
, known_ret
)
3928 & cached_nonzero_bits (XEXP (x
, 1), mode
,
3929 known_x
, known_mode
, known_ret
);
3933 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
3935 unsigned HOST_WIDE_INT nonzero0
3936 = cached_nonzero_bits (XEXP (x
, 0), mode
,
3937 known_x
, known_mode
, known_ret
);
3939 /* Don't call nonzero_bits for the second time if it cannot change
3941 if ((nonzero
& nonzero0
) != nonzero
)
3943 | cached_nonzero_bits (XEXP (x
, 1), mode
,
3944 known_x
, known_mode
, known_ret
);
3948 case PLUS
: case MINUS
:
3950 case DIV
: case UDIV
:
3951 case MOD
: case UMOD
:
3952 /* We can apply the rules of arithmetic to compute the number of
3953 high- and low-order zero bits of these operations. We start by
3954 computing the width (position of the highest-order nonzero bit)
3955 and the number of low-order zero bits for each value. */
3957 unsigned HOST_WIDE_INT nz0
3958 = cached_nonzero_bits (XEXP (x
, 0), mode
,
3959 known_x
, known_mode
, known_ret
);
3960 unsigned HOST_WIDE_INT nz1
3961 = cached_nonzero_bits (XEXP (x
, 1), mode
,
3962 known_x
, known_mode
, known_ret
);
3963 int sign_index
= GET_MODE_BITSIZE (GET_MODE (x
)) - 1;
3964 int width0
= floor_log2 (nz0
) + 1;
3965 int width1
= floor_log2 (nz1
) + 1;
3966 int low0
= floor_log2 (nz0
& -nz0
);
3967 int low1
= floor_log2 (nz1
& -nz1
);
3968 unsigned HOST_WIDE_INT op0_maybe_minusp
3969 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
3970 unsigned HOST_WIDE_INT op1_maybe_minusp
3971 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
3972 unsigned int result_width
= mode_width
;
3978 result_width
= MAX (width0
, width1
) + 1;
3979 result_low
= MIN (low0
, low1
);
3982 result_low
= MIN (low0
, low1
);
3985 result_width
= width0
+ width1
;
3986 result_low
= low0
+ low1
;
3991 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
3992 result_width
= width0
;
3997 result_width
= width0
;
4002 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4003 result_width
= MIN (width0
, width1
);
4004 result_low
= MIN (low0
, low1
);
4009 result_width
= MIN (width0
, width1
);
4010 result_low
= MIN (low0
, low1
);
4016 if (result_width
< mode_width
)
4017 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4020 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4022 #ifdef POINTERS_EXTEND_UNSIGNED
4023 /* If pointers extend unsigned and this is an addition or subtraction
4024 to a pointer in Pmode, all the bits above ptr_mode are known to be
4026 /* As we do not know which address space the pointer is refering to,
4027 we can do this only if the target does not support different pointer
4028 or address modes depending on the address space. */
4029 if (target_default_pointer_address_modes_p ()
4030 && POINTERS_EXTEND_UNSIGNED
> 0 && GET_MODE (x
) == Pmode
4031 && (code
== PLUS
|| code
== MINUS
)
4032 && REG_P (XEXP (x
, 0)) && REG_POINTER (XEXP (x
, 0)))
4033 nonzero
&= GET_MODE_MASK (ptr_mode
);
4039 if (CONST_INT_P (XEXP (x
, 1))
4040 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4041 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4045 /* If this is a SUBREG formed for a promoted variable that has
4046 been zero-extended, we know that at least the high-order bits
4047 are zero, though others might be too. */
4049 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
) > 0)
4050 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4051 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4052 known_x
, known_mode
, known_ret
);
4054 /* If the inner mode is a single word for both the host and target
4055 machines, we can compute this from which bits of the inner
4056 object might be nonzero. */
4057 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) <= BITS_PER_WORD
4058 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
)))
4059 <= HOST_BITS_PER_WIDE_INT
))
4061 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4062 known_x
, known_mode
, known_ret
);
4064 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4065 /* If this is a typical RISC machine, we only have to worry
4066 about the way loads are extended. */
4067 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4069 & (((unsigned HOST_WIDE_INT
) 1
4070 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) - 1))))
4072 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) != ZERO_EXTEND
)
4073 || !MEM_P (SUBREG_REG (x
)))
4076 /* On many CISC machines, accessing an object in a wider mode
4077 causes the high-order bits to become undefined. So they are
4078 not known to be zero. */
4079 if (GET_MODE_SIZE (GET_MODE (x
))
4080 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
4081 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4082 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
))));
4091 /* The nonzero bits are in two classes: any bits within MODE
4092 that aren't in GET_MODE (x) are always significant. The rest of the
4093 nonzero bits are those that are significant in the operand of
4094 the shift when shifted the appropriate number of bits. This
4095 shows that high-order bits are cleared by the right shift and
4096 low-order bits by left shifts. */
4097 if (CONST_INT_P (XEXP (x
, 1))
4098 && INTVAL (XEXP (x
, 1)) >= 0
4099 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4100 && INTVAL (XEXP (x
, 1)) < GET_MODE_BITSIZE (GET_MODE (x
)))
4102 enum machine_mode inner_mode
= GET_MODE (x
);
4103 unsigned int width
= GET_MODE_BITSIZE (inner_mode
);
4104 int count
= INTVAL (XEXP (x
, 1));
4105 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4106 unsigned HOST_WIDE_INT op_nonzero
4107 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4108 known_x
, known_mode
, known_ret
);
4109 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4110 unsigned HOST_WIDE_INT outer
= 0;
4112 if (mode_width
> width
)
4113 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4115 if (code
== LSHIFTRT
)
4117 else if (code
== ASHIFTRT
)
4121 /* If the sign bit may have been nonzero before the shift, we
4122 need to mark all the places it could have been copied to
4123 by the shift as possibly nonzero. */
4124 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4125 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4128 else if (code
== ASHIFT
)
4131 inner
= ((inner
<< (count
% width
)
4132 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4134 nonzero
&= (outer
| inner
);
4140 /* This is at most the number of bits in the mode. */
4141 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4145 /* If CLZ has a known value at zero, then the nonzero bits are
4146 that value, plus the number of bits in the mode minus one. */
4147 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4149 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4155 /* If CTZ has a known value at zero, then the nonzero bits are
4156 that value, plus the number of bits in the mode minus one. */
4157 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4159 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4170 unsigned HOST_WIDE_INT nonzero_true
4171 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4172 known_x
, known_mode
, known_ret
);
4174 /* Don't call nonzero_bits for the second time if it cannot change
4176 if ((nonzero
& nonzero_true
) != nonzero
)
4177 nonzero
&= nonzero_true
4178 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4179 known_x
, known_mode
, known_ret
);
4190 /* See the macro definition above. */
4191 #undef cached_num_sign_bit_copies
4194 /* The function cached_num_sign_bit_copies is a wrapper around
4195 num_sign_bit_copies1. It avoids exponential behavior in
4196 num_sign_bit_copies1 when X has identical subexpressions on the
4197 first or the second level. */
4200 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4201 enum machine_mode known_mode
,
4202 unsigned int known_ret
)
4204 if (x
== known_x
&& mode
== known_mode
)
4207 /* Try to find identical subexpressions. If found call
4208 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4209 the precomputed value for the subexpression as KNOWN_RET. */
4211 if (ARITHMETIC_P (x
))
4213 rtx x0
= XEXP (x
, 0);
4214 rtx x1
= XEXP (x
, 1);
4216 /* Check the first level. */
4219 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4220 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4224 /* Check the second level. */
4225 if (ARITHMETIC_P (x0
)
4226 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4228 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4229 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4233 if (ARITHMETIC_P (x1
)
4234 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4236 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4237 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4242 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4245 /* Return the number of bits at the high-order end of X that are known to
4246 be equal to the sign bit. X will be used in mode MODE; if MODE is
4247 VOIDmode, X will be used in its own mode. The returned value will always
4248 be between 1 and the number of bits in MODE. */
4251 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4252 enum machine_mode known_mode
,
4253 unsigned int known_ret
)
4255 enum rtx_code code
= GET_CODE (x
);
4256 unsigned int bitwidth
= GET_MODE_BITSIZE (mode
);
4257 int num0
, num1
, result
;
4258 unsigned HOST_WIDE_INT nonzero
;
4260 /* If we weren't given a mode, use the mode of X. If the mode is still
4261 VOIDmode, we don't know anything. Likewise if one of the modes is
4264 if (mode
== VOIDmode
)
4265 mode
= GET_MODE (x
);
4267 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4268 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4271 /* For a smaller object, just ignore the high bits. */
4272 if (bitwidth
< GET_MODE_BITSIZE (GET_MODE (x
)))
4274 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4275 known_x
, known_mode
, known_ret
);
4277 num0
- (int) (GET_MODE_BITSIZE (GET_MODE (x
)) - bitwidth
));
4280 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_BITSIZE (GET_MODE (x
)))
4282 #ifndef WORD_REGISTER_OPERATIONS
4283 /* If this machine does not do all register operations on the entire
4284 register and MODE is wider than the mode of X, we can say nothing
4285 at all about the high-order bits. */
4288 /* Likewise on machines that do, if the mode of the object is smaller
4289 than a word and loads of that size don't sign extend, we can say
4290 nothing about the high order bits. */
4291 if (GET_MODE_BITSIZE (GET_MODE (x
)) < BITS_PER_WORD
4292 #ifdef LOAD_EXTEND_OP
4293 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4304 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4305 /* If pointers extend signed and this is a pointer in Pmode, say that
4306 all the bits above ptr_mode are known to be sign bit copies. */
4307 /* As we do not know which address space the pointer is refering to,
4308 we can do this only if the target does not support different pointer
4309 or address modes depending on the address space. */
4310 if (target_default_pointer_address_modes_p ()
4311 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4312 && mode
== Pmode
&& REG_POINTER (x
))
4313 return GET_MODE_BITSIZE (Pmode
) - GET_MODE_BITSIZE (ptr_mode
) + 1;
4317 unsigned int copies_for_hook
= 1, copies
= 1;
4318 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4319 known_mode
, known_ret
,
4323 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4324 known_mode
, known_ret
);
4326 if (copies
> 1 || copies_for_hook
> 1)
4327 return MAX (copies
, copies_for_hook
);
4329 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4334 #ifdef LOAD_EXTEND_OP
4335 /* Some RISC machines sign-extend all loads of smaller than a word. */
4336 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4337 return MAX (1, ((int) bitwidth
4338 - (int) GET_MODE_BITSIZE (GET_MODE (x
)) + 1));
4343 /* If the constant is negative, take its 1's complement and remask.
4344 Then see how many zero bits we have. */
4345 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4346 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4347 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4348 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4350 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4353 /* If this is a SUBREG for a promoted object that is sign-extended
4354 and we are looking at it in a wider mode, we know that at least the
4355 high-order bits are known to be sign bit copies. */
4357 if (SUBREG_PROMOTED_VAR_P (x
) && ! SUBREG_PROMOTED_UNSIGNED_P (x
))
4359 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4360 known_x
, known_mode
, known_ret
);
4361 return MAX ((int) bitwidth
4362 - (int) GET_MODE_BITSIZE (GET_MODE (x
)) + 1,
4366 /* For a smaller object, just ignore the high bits. */
4367 if (bitwidth
<= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))))
4369 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4370 known_x
, known_mode
, known_ret
);
4371 return MAX (1, (num0
4372 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
)))
4376 #ifdef WORD_REGISTER_OPERATIONS
4377 #ifdef LOAD_EXTEND_OP
4378 /* For paradoxical SUBREGs on machines where all register operations
4379 affect the entire register, just look inside. Note that we are
4380 passing MODE to the recursive call, so the number of sign bit copies
4381 will remain relative to that mode, not the inner mode. */
4383 /* This works only if loads sign extend. Otherwise, if we get a
4384 reload for the inner part, it may be loaded from the stack, and
4385 then we lose all sign bit copies that existed before the store
4388 if ((GET_MODE_SIZE (GET_MODE (x
))
4389 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
4390 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4391 && MEM_P (SUBREG_REG (x
)))
4392 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4393 known_x
, known_mode
, known_ret
);
4399 if (CONST_INT_P (XEXP (x
, 1)))
4400 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4404 return (bitwidth
- GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
4405 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4406 known_x
, known_mode
, known_ret
));
4409 /* For a smaller object, just ignore the high bits. */
4410 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4411 known_x
, known_mode
, known_ret
);
4412 return MAX (1, (num0
- (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
4416 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4417 known_x
, known_mode
, known_ret
);
4419 case ROTATE
: case ROTATERT
:
4420 /* If we are rotating left by a number of bits less than the number
4421 of sign bit copies, we can just subtract that amount from the
4423 if (CONST_INT_P (XEXP (x
, 1))
4424 && INTVAL (XEXP (x
, 1)) >= 0
4425 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4427 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4428 known_x
, known_mode
, known_ret
);
4429 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4430 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4435 /* In general, this subtracts one sign bit copy. But if the value
4436 is known to be positive, the number of sign bit copies is the
4437 same as that of the input. Finally, if the input has just one bit
4438 that might be nonzero, all the bits are copies of the sign bit. */
4439 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4440 known_x
, known_mode
, known_ret
);
4441 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4442 return num0
> 1 ? num0
- 1 : 1;
4444 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4449 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4454 case IOR
: case AND
: case XOR
:
4455 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4456 /* Logical operations will preserve the number of sign-bit copies.
4457 MIN and MAX operations always return one of the operands. */
4458 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4459 known_x
, known_mode
, known_ret
);
4460 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4461 known_x
, known_mode
, known_ret
);
4463 /* If num1 is clearing some of the top bits then regardless of
4464 the other term, we are guaranteed to have at least that many
4465 high-order zero bits. */
4468 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4469 && CONST_INT_P (XEXP (x
, 1))
4470 && (UINTVAL (XEXP (x
, 1))
4471 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4474 /* Similarly for IOR when setting high-order bits. */
4477 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4478 && CONST_INT_P (XEXP (x
, 1))
4479 && (UINTVAL (XEXP (x
, 1))
4480 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4483 return MIN (num0
, num1
);
4485 case PLUS
: case MINUS
:
4486 /* For addition and subtraction, we can have a 1-bit carry. However,
4487 if we are subtracting 1 from a positive number, there will not
4488 be such a carry. Furthermore, if the positive number is known to
4489 be 0 or 1, we know the result is either -1 or 0. */
4491 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4492 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4494 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4495 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4496 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4497 : bitwidth
- floor_log2 (nonzero
) - 1);
4500 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4501 known_x
, known_mode
, known_ret
);
4502 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4503 known_x
, known_mode
, known_ret
);
4504 result
= MAX (1, MIN (num0
, num1
) - 1);
4506 #ifdef POINTERS_EXTEND_UNSIGNED
4507 /* If pointers extend signed and this is an addition or subtraction
4508 to a pointer in Pmode, all the bits above ptr_mode are known to be
4510 /* As we do not know which address space the pointer is refering to,
4511 we can do this only if the target does not support different pointer
4512 or address modes depending on the address space. */
4513 if (target_default_pointer_address_modes_p ()
4514 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4515 && (code
== PLUS
|| code
== MINUS
)
4516 && REG_P (XEXP (x
, 0)) && REG_POINTER (XEXP (x
, 0)))
4517 result
= MAX ((int) (GET_MODE_BITSIZE (Pmode
)
4518 - GET_MODE_BITSIZE (ptr_mode
) + 1),
4524 /* The number of bits of the product is the sum of the number of
4525 bits of both terms. However, unless one of the terms if known
4526 to be positive, we must allow for an additional bit since negating
4527 a negative number can remove one sign bit copy. */
4529 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4530 known_x
, known_mode
, known_ret
);
4531 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4532 known_x
, known_mode
, known_ret
);
4534 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4536 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4537 || (((nonzero_bits (XEXP (x
, 0), mode
)
4538 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4539 && ((nonzero_bits (XEXP (x
, 1), mode
)
4540 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4544 return MAX (1, result
);
4547 /* The result must be <= the first operand. If the first operand
4548 has the high bit set, we know nothing about the number of sign
4550 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4552 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4553 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4556 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4557 known_x
, known_mode
, known_ret
);
4560 /* The result must be <= the second operand. If the second operand
4561 has (or just might have) the high bit set, we know nothing about
4562 the number of sign bit copies. */
4563 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4565 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4566 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4569 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4570 known_x
, known_mode
, known_ret
);
4573 /* Similar to unsigned division, except that we have to worry about
4574 the case where the divisor is negative, in which case we have
4576 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4577 known_x
, known_mode
, known_ret
);
4579 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4580 || (nonzero_bits (XEXP (x
, 1), mode
)
4581 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4587 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4588 known_x
, known_mode
, known_ret
);
4590 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4591 || (nonzero_bits (XEXP (x
, 1), mode
)
4592 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4598 /* Shifts by a constant add to the number of bits equal to the
4600 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4601 known_x
, known_mode
, known_ret
);
4602 if (CONST_INT_P (XEXP (x
, 1))
4603 && INTVAL (XEXP (x
, 1)) > 0
4604 && INTVAL (XEXP (x
, 1)) < GET_MODE_BITSIZE (GET_MODE (x
)))
4605 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4610 /* Left shifts destroy copies. */
4611 if (!CONST_INT_P (XEXP (x
, 1))
4612 || INTVAL (XEXP (x
, 1)) < 0
4613 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4614 || INTVAL (XEXP (x
, 1)) >= GET_MODE_BITSIZE (GET_MODE (x
)))
4617 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4618 known_x
, known_mode
, known_ret
);
4619 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4622 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4623 known_x
, known_mode
, known_ret
);
4624 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4625 known_x
, known_mode
, known_ret
);
4626 return MIN (num0
, num1
);
4628 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4629 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4630 case GEU
: case GTU
: case LEU
: case LTU
:
4631 case UNORDERED
: case ORDERED
:
4632 /* If the constant is negative, take its 1's complement and remask.
4633 Then see how many zero bits we have. */
4634 nonzero
= STORE_FLAG_VALUE
;
4635 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4636 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4637 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4639 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4645 /* If we haven't been able to figure it out by one of the above rules,
4646 see if some of the high-order bits are known to be zero. If so,
4647 count those bits and return one less than that amount. If we can't
4648 safely compute the mask for this mode, always return BITWIDTH. */
4650 bitwidth
= GET_MODE_BITSIZE (mode
);
4651 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4654 nonzero
= nonzero_bits (x
, mode
);
4655 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4656 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4659 /* Calculate the rtx_cost of a single instruction. A return value of
4660 zero indicates an instruction pattern without a known cost. */
4663 insn_rtx_cost (rtx pat
, bool speed
)
4668 /* Extract the single set rtx from the instruction pattern.
4669 We can't use single_set since we only have the pattern. */
4670 if (GET_CODE (pat
) == SET
)
4672 else if (GET_CODE (pat
) == PARALLEL
)
4675 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4677 rtx x
= XVECEXP (pat
, 0, i
);
4678 if (GET_CODE (x
) == SET
)
4691 cost
= rtx_cost (SET_SRC (set
), SET
, speed
);
4692 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4695 /* Given an insn INSN and condition COND, return the condition in a
4696 canonical form to simplify testing by callers. Specifically:
4698 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4699 (2) Both operands will be machine operands; (cc0) will have been replaced.
4700 (3) If an operand is a constant, it will be the second operand.
4701 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4702 for GE, GEU, and LEU.
4704 If the condition cannot be understood, or is an inequality floating-point
4705 comparison which needs to be reversed, 0 will be returned.
4707 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4709 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4710 insn used in locating the condition was found. If a replacement test
4711 of the condition is desired, it should be placed in front of that
4712 insn and we will be sure that the inputs are still valid.
4714 If WANT_REG is nonzero, we wish the condition to be relative to that
4715 register, if possible. Therefore, do not canonicalize the condition
4716 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4717 to be a compare to a CC mode register.
4719 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4723 canonicalize_condition (rtx insn
, rtx cond
, int reverse
, rtx
*earliest
,
4724 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4731 int reverse_code
= 0;
4732 enum machine_mode mode
;
4733 basic_block bb
= BLOCK_FOR_INSN (insn
);
4735 code
= GET_CODE (cond
);
4736 mode
= GET_MODE (cond
);
4737 op0
= XEXP (cond
, 0);
4738 op1
= XEXP (cond
, 1);
4741 code
= reversed_comparison_code (cond
, insn
);
4742 if (code
== UNKNOWN
)
4748 /* If we are comparing a register with zero, see if the register is set
4749 in the previous insn to a COMPARE or a comparison operation. Perform
4750 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4753 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
4754 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
4755 && op1
== CONST0_RTX (GET_MODE (op0
))
4758 /* Set nonzero when we find something of interest. */
4762 /* If comparison with cc0, import actual comparison from compare
4766 if ((prev
= prev_nonnote_insn (prev
)) == 0
4767 || !NONJUMP_INSN_P (prev
)
4768 || (set
= single_set (prev
)) == 0
4769 || SET_DEST (set
) != cc0_rtx
)
4772 op0
= SET_SRC (set
);
4773 op1
= CONST0_RTX (GET_MODE (op0
));
4779 /* If this is a COMPARE, pick up the two things being compared. */
4780 if (GET_CODE (op0
) == COMPARE
)
4782 op1
= XEXP (op0
, 1);
4783 op0
= XEXP (op0
, 0);
4786 else if (!REG_P (op0
))
4789 /* Go back to the previous insn. Stop if it is not an INSN. We also
4790 stop if it isn't a single set or if it has a REG_INC note because
4791 we don't want to bother dealing with it. */
4793 prev
= prev_nonnote_nondebug_insn (prev
);
4796 || !NONJUMP_INSN_P (prev
)
4797 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
4798 /* In cfglayout mode, there do not have to be labels at the
4799 beginning of a block, or jumps at the end, so the previous
4800 conditions would not stop us when we reach bb boundary. */
4801 || BLOCK_FOR_INSN (prev
) != bb
)
4804 set
= set_of (op0
, prev
);
4807 && (GET_CODE (set
) != SET
4808 || !rtx_equal_p (SET_DEST (set
), op0
)))
4811 /* If this is setting OP0, get what it sets it to if it looks
4815 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
4816 #ifdef FLOAT_STORE_FLAG_VALUE
4817 REAL_VALUE_TYPE fsfv
;
4820 /* ??? We may not combine comparisons done in a CCmode with
4821 comparisons not done in a CCmode. This is to aid targets
4822 like Alpha that have an IEEE compliant EQ instruction, and
4823 a non-IEEE compliant BEQ instruction. The use of CCmode is
4824 actually artificial, simply to prevent the combination, but
4825 should not affect other platforms.
4827 However, we must allow VOIDmode comparisons to match either
4828 CCmode or non-CCmode comparison, because some ports have
4829 modeless comparisons inside branch patterns.
4831 ??? This mode check should perhaps look more like the mode check
4832 in simplify_comparison in combine. */
4834 if ((GET_CODE (SET_SRC (set
)) == COMPARE
4837 && GET_MODE_CLASS (inner_mode
) == MODE_INT
4838 && (GET_MODE_BITSIZE (inner_mode
)
4839 <= HOST_BITS_PER_WIDE_INT
)
4840 && (STORE_FLAG_VALUE
4841 & ((unsigned HOST_WIDE_INT
) 1
4842 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
4843 #ifdef FLOAT_STORE_FLAG_VALUE
4845 && SCALAR_FLOAT_MODE_P (inner_mode
)
4846 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4847 REAL_VALUE_NEGATIVE (fsfv
)))
4850 && COMPARISON_P (SET_SRC (set
))))
4851 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4852 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
4853 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
4855 else if (((code
== EQ
4857 && (GET_MODE_BITSIZE (inner_mode
)
4858 <= HOST_BITS_PER_WIDE_INT
)
4859 && GET_MODE_CLASS (inner_mode
) == MODE_INT
4860 && (STORE_FLAG_VALUE
4861 & ((unsigned HOST_WIDE_INT
) 1
4862 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
4863 #ifdef FLOAT_STORE_FLAG_VALUE
4865 && SCALAR_FLOAT_MODE_P (inner_mode
)
4866 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4867 REAL_VALUE_NEGATIVE (fsfv
)))
4870 && COMPARISON_P (SET_SRC (set
))
4871 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4872 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
4873 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
4883 else if (reg_set_p (op0
, prev
))
4884 /* If this sets OP0, but not directly, we have to give up. */
4889 /* If the caller is expecting the condition to be valid at INSN,
4890 make sure X doesn't change before INSN. */
4891 if (valid_at_insn_p
)
4892 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
4894 if (COMPARISON_P (x
))
4895 code
= GET_CODE (x
);
4898 code
= reversed_comparison_code (x
, prev
);
4899 if (code
== UNKNOWN
)
4904 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
4910 /* If constant is first, put it last. */
4911 if (CONSTANT_P (op0
))
4912 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
4914 /* If OP0 is the result of a comparison, we weren't able to find what
4915 was really being compared, so fail. */
4917 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
4920 /* Canonicalize any ordered comparison with integers involving equality
4921 if we can do computations in the relevant mode and we do not
4924 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
4925 && CONST_INT_P (op1
)
4926 && GET_MODE (op0
) != VOIDmode
4927 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
4929 HOST_WIDE_INT const_val
= INTVAL (op1
);
4930 unsigned HOST_WIDE_INT uconst_val
= const_val
;
4931 unsigned HOST_WIDE_INT max_val
4932 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
4937 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
4938 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
4941 /* When cross-compiling, const_val might be sign-extended from
4942 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4944 if ((const_val
& max_val
)
4945 != ((unsigned HOST_WIDE_INT
) 1
4946 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1)))
4947 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
4951 if (uconst_val
< max_val
)
4952 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
4956 if (uconst_val
!= 0)
4957 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
4965 /* Never return CC0; return zero instead. */
4969 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
4972 /* Given a jump insn JUMP, return the condition that will cause it to branch
4973 to its JUMP_LABEL. If the condition cannot be understood, or is an
4974 inequality floating-point comparison which needs to be reversed, 0 will
4977 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4978 insn used in locating the condition was found. If a replacement test
4979 of the condition is desired, it should be placed in front of that
4980 insn and we will be sure that the inputs are still valid. If EARLIEST
4981 is null, the returned condition will be valid at INSN.
4983 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4984 compare CC mode register.
4986 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4989 get_condition (rtx jump
, rtx
*earliest
, int allow_cc_mode
, int valid_at_insn_p
)
4995 /* If this is not a standard conditional jump, we can't parse it. */
4997 || ! any_condjump_p (jump
))
4999 set
= pc_set (jump
);
5001 cond
= XEXP (SET_SRC (set
), 0);
5003 /* If this branches to JUMP_LABEL when the condition is false, reverse
5006 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5007 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
5009 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5010 allow_cc_mode
, valid_at_insn_p
);
5013 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5014 TARGET_MODE_REP_EXTENDED.
5016 Note that we assume that the property of
5017 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5018 narrower than mode B. I.e., if A is a mode narrower than B then in
5019 order to be able to operate on it in mode B, mode A needs to
5020 satisfy the requirements set by the representation of mode B. */
5023 init_num_sign_bit_copies_in_rep (void)
5025 enum machine_mode mode
, in_mode
;
5027 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5028 in_mode
= GET_MODE_WIDER_MODE (mode
))
5029 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5030 mode
= GET_MODE_WIDER_MODE (mode
))
5032 enum machine_mode i
;
5034 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5035 extends to the next widest mode. */
5036 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5037 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5039 /* We are in in_mode. Count how many bits outside of mode
5040 have to be copies of the sign-bit. */
5041 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5043 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5045 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5046 /* We can only check sign-bit copies starting from the
5047 top-bit. In order to be able to check the bits we
5048 have already seen we pretend that subsequent bits
5049 have to be sign-bit copies too. */
5050 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5051 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5052 += GET_MODE_BITSIZE (wider
) - GET_MODE_BITSIZE (i
);
5057 /* Suppose that truncation from the machine mode of X to MODE is not a
5058 no-op. See if there is anything special about X so that we can
5059 assume it already contains a truncated value of MODE. */
5062 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5064 /* This register has already been used in MODE without explicit
5066 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5069 /* See if we already satisfy the requirements of MODE. If yes we
5070 can just switch to MODE. */
5071 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5072 && (num_sign_bit_copies (x
, GET_MODE (x
))
5073 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5079 /* Initialize non_rtx_starting_operands, which is used to speed up
5085 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5087 const char *format
= GET_RTX_FORMAT (i
);
5088 const char *first
= strpbrk (format
, "eEV");
5089 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5092 init_num_sign_bit_copies_in_rep ();
5095 /* Check whether this is a constant pool constant. */
5097 constant_pool_constant_p (rtx x
)
5099 x
= avoid_constant_pool_reference (x
);
5100 return GET_CODE (x
) == CONST_DOUBLE
;
5103 /* If M is a bitmask that selects a field of low-order bits within an item but
5104 not the entire word, return the length of the field. Return -1 otherwise.
5105 M is used in machine mode MODE. */
5108 low_bitmask_len (enum machine_mode mode
, unsigned HOST_WIDE_INT m
)
5110 if (mode
!= VOIDmode
)
5112 if (GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
5114 m
&= GET_MODE_MASK (mode
);
5117 return exact_log2 (m
+ 1);