1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
42 /* Information about a subreg of a hard register. */
45 /* Offset of first hard register involved in the subreg. */
47 /* Number of hard registers involved in the subreg. */
49 /* Whether this subreg can be represented as a hard reg with the new
54 /* Forward declarations */
55 static void set_of_1 (rtx
, const_rtx
, void *);
56 static bool covers_regno_p (const_rtx
, unsigned int);
57 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
58 static int rtx_referenced_p_1 (rtx
*, void *);
59 static int computed_jump_p_1 (const_rtx
);
60 static void parms_set (rtx
, const_rtx
, void *);
61 static void subreg_get_info (unsigned int, enum machine_mode
,
62 unsigned int, enum machine_mode
,
63 struct subreg_info
*);
65 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
66 const_rtx
, enum machine_mode
,
67 unsigned HOST_WIDE_INT
);
68 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
69 const_rtx
, enum machine_mode
,
70 unsigned HOST_WIDE_INT
);
71 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
74 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
75 enum machine_mode
, unsigned int);
77 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
78 -1 if a code has no such operand. */
79 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
81 /* Bit flags that specify the machine subtype we are compiling for.
82 Bits are tested using macros TARGET_... defined in the tm.h file
83 and set by `-m...' switches. Must be defined in rtlanal.c. */
87 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
88 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
89 SIGN_EXTEND then while narrowing we also have to enforce the
90 representation and sign-extend the value to mode DESTINATION_REP.
92 If the value is already sign-extended to DESTINATION_REP mode we
93 can just switch to DESTINATION mode on it. For each pair of
94 integral modes SOURCE and DESTINATION, when truncating from SOURCE
95 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
96 contains the number of high-order bits in SOURCE that have to be
97 copies of the sign-bit so that we can do this mode-switch to
101 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
103 /* Return 1 if the value of X is unstable
104 (would be different at a different point in the program).
105 The frame pointer, arg pointer, etc. are considered stable
106 (within one function) and so is anything marked `unchanging'. */
109 rtx_unstable_p (const_rtx x
)
111 const RTX_CODE code
= GET_CODE (x
);
118 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
130 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
131 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
132 /* The arg pointer varies if it is not a fixed register. */
133 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
135 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
136 /* ??? When call-clobbered, the value is stable modulo the restore
137 that must happen after a call. This currently screws up local-alloc
138 into believing that the restore is not needed. */
139 if (x
== pic_offset_table_rtx
)
145 if (MEM_VOLATILE_P (x
))
154 fmt
= GET_RTX_FORMAT (code
);
155 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
158 if (rtx_unstable_p (XEXP (x
, i
)))
161 else if (fmt
[i
] == 'E')
164 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
165 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
172 /* Return 1 if X has a value that can vary even between two
173 executions of the program. 0 means X can be compared reliably
174 against certain constants or near-constants.
175 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
176 zero, we are slightly more conservative.
177 The frame pointer and the arg pointer are considered constant. */
180 rtx_varies_p (const_rtx x
, bool for_alias
)
193 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
205 /* Note that we have to test for the actual rtx used for the frame
206 and arg pointers and not just the register number in case we have
207 eliminated the frame and/or arg pointer and are using it
209 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
210 /* The arg pointer varies if it is not a fixed register. */
211 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
213 if (x
== pic_offset_table_rtx
214 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
215 /* ??? When call-clobbered, the value is stable modulo the restore
216 that must happen after a call. This currently screws up
217 local-alloc into believing that the restore is not needed, so we
218 must return 0 only if we are called from alias analysis. */
226 /* The operand 0 of a LO_SUM is considered constant
227 (in fact it is related specifically to operand 1)
228 during alias analysis. */
229 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
230 || rtx_varies_p (XEXP (x
, 1), for_alias
);
233 if (MEM_VOLATILE_P (x
))
242 fmt
= GET_RTX_FORMAT (code
);
243 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
246 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
249 else if (fmt
[i
] == 'E')
252 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
253 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
260 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
261 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
262 whether nonzero is returned for unaligned memory accesses on strict
263 alignment machines. */
266 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
267 enum machine_mode mode
, bool unaligned_mems
)
269 enum rtx_code code
= GET_CODE (x
);
273 && GET_MODE_SIZE (mode
) != 0)
275 HOST_WIDE_INT actual_offset
= offset
;
276 #ifdef SPARC_STACK_BOUNDARY_HACK
277 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
278 the real alignment of %sp. However, when it does this, the
279 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
280 if (SPARC_STACK_BOUNDARY_HACK
281 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
282 actual_offset
-= STACK_POINTER_OFFSET
;
285 return actual_offset
% GET_MODE_SIZE (mode
) != 0;
291 if (SYMBOL_REF_WEAK (x
))
293 if (!CONSTANT_POOL_ADDRESS_P (x
))
296 HOST_WIDE_INT decl_size
;
301 size
= GET_MODE_SIZE (mode
);
305 /* If the size of the access or of the symbol is unknown,
307 decl
= SYMBOL_REF_DECL (x
);
309 /* Else check that the access is in bounds. TODO: restructure
310 expr_size/lhd_expr_size/int_expr_size and just use the latter. */
313 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
314 decl_size
= (host_integerp (DECL_SIZE_UNIT (decl
), 0)
315 ? tree_low_cst (DECL_SIZE_UNIT (decl
), 0)
317 else if (TREE_CODE (decl
) == STRING_CST
)
318 decl_size
= TREE_STRING_LENGTH (decl
);
319 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
320 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
324 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
333 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
334 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
335 || x
== stack_pointer_rtx
336 /* The arg pointer varies if it is not a fixed register. */
337 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
339 /* All of the virtual frame registers are stack references. */
340 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
341 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
346 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
347 mode
, unaligned_mems
);
350 /* An address is assumed not to trap if:
351 - it is the pic register plus a constant. */
352 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
355 /* - or it is an address that can't trap plus a constant integer,
356 with the proper remainder modulo the mode size if we are
357 considering unaligned memory references. */
358 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
359 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
360 size
, mode
, unaligned_mems
))
367 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
368 mode
, unaligned_mems
);
375 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
376 mode
, unaligned_mems
);
382 /* If it isn't one of the case above, it can cause a trap. */
386 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
389 rtx_addr_can_trap_p (const_rtx x
)
391 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
394 /* Return true if X is an address that is known to not be zero. */
397 nonzero_address_p (const_rtx x
)
399 const enum rtx_code code
= GET_CODE (x
);
404 return !SYMBOL_REF_WEAK (x
);
410 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
411 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
412 || x
== stack_pointer_rtx
413 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
415 /* All of the virtual frame registers are stack references. */
416 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
417 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
422 return nonzero_address_p (XEXP (x
, 0));
425 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
426 return nonzero_address_p (XEXP (x
, 0));
427 /* Handle PIC references. */
428 else if (XEXP (x
, 0) == pic_offset_table_rtx
429 && CONSTANT_P (XEXP (x
, 1)))
434 /* Similar to the above; allow positive offsets. Further, since
435 auto-inc is only allowed in memories, the register must be a
437 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
438 && INTVAL (XEXP (x
, 1)) > 0)
440 return nonzero_address_p (XEXP (x
, 0));
443 /* Similarly. Further, the offset is always positive. */
450 return nonzero_address_p (XEXP (x
, 0));
453 return nonzero_address_p (XEXP (x
, 1));
459 /* If it isn't one of the case above, might be zero. */
463 /* Return 1 if X refers to a memory location whose address
464 cannot be compared reliably with constant addresses,
465 or if X refers to a BLKmode memory object.
466 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
467 zero, we are slightly more conservative. */
470 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
481 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
483 fmt
= GET_RTX_FORMAT (code
);
484 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
487 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
490 else if (fmt
[i
] == 'E')
493 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
494 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
500 /* Return the value of the integer term in X, if one is apparent;
502 Only obvious integer terms are detected.
503 This is used in cse.c with the `related_value' field. */
506 get_integer_term (const_rtx x
)
508 if (GET_CODE (x
) == CONST
)
511 if (GET_CODE (x
) == MINUS
512 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
513 return - INTVAL (XEXP (x
, 1));
514 if (GET_CODE (x
) == PLUS
515 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
516 return INTVAL (XEXP (x
, 1));
520 /* If X is a constant, return the value sans apparent integer term;
522 Only obvious integer terms are detected. */
525 get_related_value (const_rtx x
)
527 if (GET_CODE (x
) != CONST
)
530 if (GET_CODE (x
) == PLUS
531 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
533 else if (GET_CODE (x
) == MINUS
534 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
539 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
540 to somewhere in the same object or object_block as SYMBOL. */
543 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
547 if (GET_CODE (symbol
) != SYMBOL_REF
)
555 if (CONSTANT_POOL_ADDRESS_P (symbol
)
556 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
559 decl
= SYMBOL_REF_DECL (symbol
);
560 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
564 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
565 && SYMBOL_REF_BLOCK (symbol
)
566 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
567 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
568 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
574 /* Split X into a base and a constant offset, storing them in *BASE_OUT
575 and *OFFSET_OUT respectively. */
578 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
580 if (GET_CODE (x
) == CONST
)
583 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
585 *base_out
= XEXP (x
, 0);
586 *offset_out
= XEXP (x
, 1);
591 *offset_out
= const0_rtx
;
594 /* Return the number of places FIND appears within X. If COUNT_DEST is
595 zero, we do not count occurrences inside the destination of a SET. */
598 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
602 const char *format_ptr
;
624 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
626 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
630 if (MEM_P (find
) && rtx_equal_p (x
, find
))
635 if (SET_DEST (x
) == find
&& ! count_dest
)
636 return count_occurrences (SET_SRC (x
), find
, count_dest
);
643 format_ptr
= GET_RTX_FORMAT (code
);
646 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
648 switch (*format_ptr
++)
651 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
655 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
656 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
664 /* Nonzero if register REG appears somewhere within IN.
665 Also works if REG is not a register; in this case it checks
666 for a subexpression of IN that is Lisp "equal" to REG. */
669 reg_mentioned_p (const_rtx reg
, const_rtx in
)
681 if (GET_CODE (in
) == LABEL_REF
)
682 return reg
== XEXP (in
, 0);
684 code
= GET_CODE (in
);
688 /* Compare registers by number. */
690 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
692 /* These codes have no constituent expressions
703 /* These are kept unique for a given value. */
710 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
713 fmt
= GET_RTX_FORMAT (code
);
715 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
720 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
721 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
724 else if (fmt
[i
] == 'e'
725 && reg_mentioned_p (reg
, XEXP (in
, i
)))
731 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
732 no CODE_LABEL insn. */
735 no_labels_between_p (const_rtx beg
, const_rtx end
)
740 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
746 /* Nonzero if register REG is used in an insn between
747 FROM_INSN and TO_INSN (exclusive of those two). */
750 reg_used_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
754 if (from_insn
== to_insn
)
757 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
759 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
760 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
765 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
766 is entirely replaced by a new value and the only use is as a SET_DEST,
767 we do not consider it a reference. */
770 reg_referenced_p (const_rtx x
, const_rtx body
)
774 switch (GET_CODE (body
))
777 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
780 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
781 of a REG that occupies all of the REG, the insn references X if
782 it is mentioned in the destination. */
783 if (GET_CODE (SET_DEST (body
)) != CC0
784 && GET_CODE (SET_DEST (body
)) != PC
785 && !REG_P (SET_DEST (body
))
786 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
787 && REG_P (SUBREG_REG (SET_DEST (body
)))
788 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
789 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
790 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
791 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
792 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
797 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
798 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
805 return reg_overlap_mentioned_p (x
, body
);
808 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
811 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
814 case UNSPEC_VOLATILE
:
815 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
816 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
821 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
822 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
827 if (MEM_P (XEXP (body
, 0)))
828 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
833 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
835 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
842 /* Nonzero if register REG is set or clobbered in an insn between
843 FROM_INSN and TO_INSN (exclusive of those two). */
846 reg_set_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
850 if (from_insn
== to_insn
)
853 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
854 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
859 /* Internals of reg_set_between_p. */
861 reg_set_p (const_rtx reg
, const_rtx insn
)
863 /* We can be passed an insn or part of one. If we are passed an insn,
864 check if a side-effect of the insn clobbers REG. */
866 && (FIND_REG_INC_NOTE (insn
, reg
)
869 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
870 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
871 GET_MODE (reg
), REGNO (reg
)))
873 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
876 return set_of (reg
, insn
) != NULL_RTX
;
879 /* Similar to reg_set_between_p, but check all registers in X. Return 0
880 only if none of them are modified between START and END. Return 1 if
881 X contains a MEM; this routine does use memory aliasing. */
884 modified_between_p (const_rtx x
, const_rtx start
, const_rtx end
)
886 const enum rtx_code code
= GET_CODE (x
);
910 if (modified_between_p (XEXP (x
, 0), start
, end
))
912 if (MEM_READONLY_P (x
))
914 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
915 if (memory_modified_in_insn_p (x
, insn
))
921 return reg_set_between_p (x
, start
, end
);
927 fmt
= GET_RTX_FORMAT (code
);
928 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
930 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
933 else if (fmt
[i
] == 'E')
934 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
935 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
942 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
943 of them are modified in INSN. Return 1 if X contains a MEM; this routine
944 does use memory aliasing. */
947 modified_in_p (const_rtx x
, const_rtx insn
)
949 const enum rtx_code code
= GET_CODE (x
);
969 if (modified_in_p (XEXP (x
, 0), insn
))
971 if (MEM_READONLY_P (x
))
973 if (memory_modified_in_insn_p (x
, insn
))
979 return reg_set_p (x
, insn
);
985 fmt
= GET_RTX_FORMAT (code
);
986 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
988 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
991 else if (fmt
[i
] == 'E')
992 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
993 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1000 /* Helper function for set_of. */
1008 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1010 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1011 if (rtx_equal_p (x
, data
->pat
)
1012 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1016 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1017 (either directly or via STRICT_LOW_PART and similar modifiers). */
1019 set_of (const_rtx pat
, const_rtx insn
)
1021 struct set_of_data data
;
1022 data
.found
= NULL_RTX
;
1024 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1028 /* Given an INSN, return a SET expression if this insn has only a single SET.
1029 It may also have CLOBBERs, USEs, or SET whose output
1030 will not be used, which we ignore. */
1033 single_set_2 (const_rtx insn
, const_rtx pat
)
1036 int set_verified
= 1;
1039 if (GET_CODE (pat
) == PARALLEL
)
1041 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1043 rtx sub
= XVECEXP (pat
, 0, i
);
1044 switch (GET_CODE (sub
))
1051 /* We can consider insns having multiple sets, where all
1052 but one are dead as single set insns. In common case
1053 only single set is present in the pattern so we want
1054 to avoid checking for REG_UNUSED notes unless necessary.
1056 When we reach set first time, we just expect this is
1057 the single set we are looking for and only when more
1058 sets are found in the insn, we check them. */
1061 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1062 && !side_effects_p (set
))
1068 set
= sub
, set_verified
= 0;
1069 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1070 || side_effects_p (sub
))
1082 /* Given an INSN, return nonzero if it has more than one SET, else return
1086 multiple_sets (const_rtx insn
)
1091 /* INSN must be an insn. */
1092 if (! INSN_P (insn
))
1095 /* Only a PARALLEL can have multiple SETs. */
1096 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1098 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1099 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1101 /* If we have already found a SET, then return now. */
1109 /* Either zero or one SET. */
1113 /* Return nonzero if the destination of SET equals the source
1114 and there are no side effects. */
1117 set_noop_p (const_rtx set
)
1119 rtx src
= SET_SRC (set
);
1120 rtx dst
= SET_DEST (set
);
1122 if (dst
== pc_rtx
&& src
== pc_rtx
)
1125 if (MEM_P (dst
) && MEM_P (src
))
1126 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1128 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1129 return rtx_equal_p (XEXP (dst
, 0), src
)
1130 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1131 && !side_effects_p (src
);
1133 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1134 dst
= XEXP (dst
, 0);
1136 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1138 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1140 src
= SUBREG_REG (src
);
1141 dst
= SUBREG_REG (dst
);
1144 return (REG_P (src
) && REG_P (dst
)
1145 && REGNO (src
) == REGNO (dst
));
1148 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1152 noop_move_p (const_rtx insn
)
1154 rtx pat
= PATTERN (insn
);
1156 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1159 /* Insns carrying these notes are useful later on. */
1160 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1163 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1166 if (GET_CODE (pat
) == PARALLEL
)
1169 /* If nothing but SETs of registers to themselves,
1170 this insn can also be deleted. */
1171 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1173 rtx tem
= XVECEXP (pat
, 0, i
);
1175 if (GET_CODE (tem
) == USE
1176 || GET_CODE (tem
) == CLOBBER
)
1179 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1189 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1190 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1191 If the object was modified, if we hit a partial assignment to X, or hit a
1192 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1193 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1197 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1201 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1205 rtx set
= single_set (p
);
1206 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1208 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1210 rtx src
= SET_SRC (set
);
1212 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1213 src
= XEXP (note
, 0);
1215 if ((valid_to
== NULL_RTX
1216 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1217 /* Reject hard registers because we don't usually want
1218 to use them; we'd rather use a pseudo. */
1220 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1227 /* If set in non-simple way, we don't have a value. */
1228 if (reg_set_p (x
, p
))
1235 /* Return nonzero if register in range [REGNO, ENDREGNO)
1236 appears either explicitly or implicitly in X
1237 other than being stored into.
1239 References contained within the substructure at LOC do not count.
1240 LOC may be zero, meaning don't ignore anything. */
1243 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1247 unsigned int x_regno
;
1252 /* The contents of a REG_NONNEG note is always zero, so we must come here
1253 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1257 code
= GET_CODE (x
);
1262 x_regno
= REGNO (x
);
1264 /* If we modifying the stack, frame, or argument pointer, it will
1265 clobber a virtual register. In fact, we could be more precise,
1266 but it isn't worth it. */
1267 if ((x_regno
== STACK_POINTER_REGNUM
1268 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1269 || x_regno
== ARG_POINTER_REGNUM
1271 || x_regno
== FRAME_POINTER_REGNUM
)
1272 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1275 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1278 /* If this is a SUBREG of a hard reg, we can see exactly which
1279 registers are being modified. Otherwise, handle normally. */
1280 if (REG_P (SUBREG_REG (x
))
1281 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1283 unsigned int inner_regno
= subreg_regno (x
);
1284 unsigned int inner_endregno
1285 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1286 ? subreg_nregs (x
) : 1);
1288 return endregno
> inner_regno
&& regno
< inner_endregno
;
1294 if (&SET_DEST (x
) != loc
1295 /* Note setting a SUBREG counts as referring to the REG it is in for
1296 a pseudo but not for hard registers since we can
1297 treat each word individually. */
1298 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1299 && loc
!= &SUBREG_REG (SET_DEST (x
))
1300 && REG_P (SUBREG_REG (SET_DEST (x
)))
1301 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1302 && refers_to_regno_p (regno
, endregno
,
1303 SUBREG_REG (SET_DEST (x
)), loc
))
1304 || (!REG_P (SET_DEST (x
))
1305 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1308 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1317 /* X does not match, so try its subexpressions. */
1319 fmt
= GET_RTX_FORMAT (code
);
1320 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1322 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1330 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1333 else if (fmt
[i
] == 'E')
1336 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1337 if (loc
!= &XVECEXP (x
, i
, j
)
1338 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1345 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1346 we check if any register number in X conflicts with the relevant register
1347 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1348 contains a MEM (we don't bother checking for memory addresses that can't
1349 conflict because we expect this to be a rare case. */
1352 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1354 unsigned int regno
, endregno
;
1356 /* If either argument is a constant, then modifying X can not
1357 affect IN. Here we look at IN, we can profitably combine
1358 CONSTANT_P (x) with the switch statement below. */
1359 if (CONSTANT_P (in
))
1363 switch (GET_CODE (x
))
1365 case STRICT_LOW_PART
:
1368 /* Overly conservative. */
1373 regno
= REGNO (SUBREG_REG (x
));
1374 if (regno
< FIRST_PSEUDO_REGISTER
)
1375 regno
= subreg_regno (x
);
1376 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1377 ? subreg_nregs (x
) : 1);
1382 endregno
= END_REGNO (x
);
1384 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1394 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1395 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1398 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1401 else if (fmt
[i
] == 'E')
1404 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1405 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1415 return reg_mentioned_p (x
, in
);
1421 /* If any register in here refers to it we return true. */
1422 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1423 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1424 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1430 gcc_assert (CONSTANT_P (x
));
1435 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1436 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1437 ignored by note_stores, but passed to FUN.
1439 FUN receives three arguments:
1440 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1441 2. the SET or CLOBBER rtx that does the store,
1442 3. the pointer DATA provided to note_stores.
1444 If the item being stored in or clobbered is a SUBREG of a hard register,
1445 the SUBREG will be passed. */
1448 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1452 if (GET_CODE (x
) == COND_EXEC
)
1453 x
= COND_EXEC_CODE (x
);
1455 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1457 rtx dest
= SET_DEST (x
);
1459 while ((GET_CODE (dest
) == SUBREG
1460 && (!REG_P (SUBREG_REG (dest
))
1461 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1462 || GET_CODE (dest
) == ZERO_EXTRACT
1463 || GET_CODE (dest
) == STRICT_LOW_PART
)
1464 dest
= XEXP (dest
, 0);
1466 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1467 each of whose first operand is a register. */
1468 if (GET_CODE (dest
) == PARALLEL
)
1470 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1471 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1472 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1475 (*fun
) (dest
, x
, data
);
1478 else if (GET_CODE (x
) == PARALLEL
)
1479 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1480 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1483 /* Like notes_stores, but call FUN for each expression that is being
1484 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1485 FUN for each expression, not any interior subexpressions. FUN receives a
1486 pointer to the expression and the DATA passed to this function.
1488 Note that this is not quite the same test as that done in reg_referenced_p
1489 since that considers something as being referenced if it is being
1490 partially set, while we do not. */
1493 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1498 switch (GET_CODE (body
))
1501 (*fun
) (&COND_EXEC_TEST (body
), data
);
1502 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1506 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1507 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1511 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1512 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1516 (*fun
) (&XEXP (body
, 0), data
);
1520 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1521 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1525 (*fun
) (&TRAP_CONDITION (body
), data
);
1529 (*fun
) (&XEXP (body
, 0), data
);
1533 case UNSPEC_VOLATILE
:
1534 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1535 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1539 if (MEM_P (XEXP (body
, 0)))
1540 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1545 rtx dest
= SET_DEST (body
);
1547 /* For sets we replace everything in source plus registers in memory
1548 expression in store and operands of a ZERO_EXTRACT. */
1549 (*fun
) (&SET_SRC (body
), data
);
1551 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1553 (*fun
) (&XEXP (dest
, 1), data
);
1554 (*fun
) (&XEXP (dest
, 2), data
);
1557 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1558 dest
= XEXP (dest
, 0);
1561 (*fun
) (&XEXP (dest
, 0), data
);
1566 /* All the other possibilities never store. */
1567 (*fun
) (pbody
, data
);
1572 /* Return nonzero if X's old contents don't survive after INSN.
1573 This will be true if X is (cc0) or if X is a register and
1574 X dies in INSN or because INSN entirely sets X.
1576 "Entirely set" means set directly and not through a SUBREG, or
1577 ZERO_EXTRACT, so no trace of the old contents remains.
1578 Likewise, REG_INC does not count.
1580 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1581 but for this use that makes no difference, since regs don't overlap
1582 during their lifetimes. Therefore, this function may be used
1583 at any time after deaths have been computed.
1585 If REG is a hard reg that occupies multiple machine registers, this
1586 function will only return 1 if each of those registers will be replaced
1590 dead_or_set_p (const_rtx insn
, const_rtx x
)
1592 unsigned int regno
, end_regno
;
1595 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1596 if (GET_CODE (x
) == CC0
)
1599 gcc_assert (REG_P (x
));
1602 end_regno
= END_REGNO (x
);
1603 for (i
= regno
; i
< end_regno
; i
++)
1604 if (! dead_or_set_regno_p (insn
, i
))
1610 /* Return TRUE iff DEST is a register or subreg of a register and
1611 doesn't change the number of words of the inner register, and any
1612 part of the register is TEST_REGNO. */
1615 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1617 unsigned int regno
, endregno
;
1619 if (GET_CODE (dest
) == SUBREG
1620 && (((GET_MODE_SIZE (GET_MODE (dest
))
1621 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1622 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1623 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1624 dest
= SUBREG_REG (dest
);
1629 regno
= REGNO (dest
);
1630 endregno
= END_REGNO (dest
);
1631 return (test_regno
>= regno
&& test_regno
< endregno
);
1634 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1635 any member matches the covers_regno_no_parallel_p criteria. */
1638 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1640 if (GET_CODE (dest
) == PARALLEL
)
1642 /* Some targets place small structures in registers for return
1643 values of functions, and those registers are wrapped in
1644 PARALLELs that we may see as the destination of a SET. */
1647 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1649 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1650 if (inner
!= NULL_RTX
1651 && covers_regno_no_parallel_p (inner
, test_regno
))
1658 return covers_regno_no_parallel_p (dest
, test_regno
);
1661 /* Utility function for dead_or_set_p to check an individual register. */
1664 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1668 /* See if there is a death note for something that includes TEST_REGNO. */
1669 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1673 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1676 pattern
= PATTERN (insn
);
1678 if (GET_CODE (pattern
) == COND_EXEC
)
1679 pattern
= COND_EXEC_CODE (pattern
);
1681 if (GET_CODE (pattern
) == SET
)
1682 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1683 else if (GET_CODE (pattern
) == PARALLEL
)
1687 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1689 rtx body
= XVECEXP (pattern
, 0, i
);
1691 if (GET_CODE (body
) == COND_EXEC
)
1692 body
= COND_EXEC_CODE (body
);
1694 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1695 && covers_regno_p (SET_DEST (body
), test_regno
))
1703 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1704 If DATUM is nonzero, look for one whose datum is DATUM. */
1707 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1713 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1714 if (! INSN_P (insn
))
1718 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1719 if (REG_NOTE_KIND (link
) == kind
)
1724 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1725 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1730 /* Return the reg-note of kind KIND in insn INSN which applies to register
1731 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1732 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1733 it might be the case that the note overlaps REGNO. */
1736 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1740 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1741 if (! INSN_P (insn
))
1744 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1745 if (REG_NOTE_KIND (link
) == kind
1746 /* Verify that it is a register, so that scratch and MEM won't cause a
1748 && REG_P (XEXP (link
, 0))
1749 && REGNO (XEXP (link
, 0)) <= regno
1750 && END_REGNO (XEXP (link
, 0)) > regno
)
1755 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1759 find_reg_equal_equiv_note (const_rtx insn
)
1766 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1767 if (REG_NOTE_KIND (link
) == REG_EQUAL
1768 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1770 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1771 insns that have multiple sets. Checking single_set to
1772 make sure of this is not the proper check, as explained
1773 in the comment in set_unique_reg_note.
1775 This should be changed into an assert. */
1776 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1783 /* Check whether INSN is a single_set whose source is known to be
1784 equivalent to a constant. Return that constant if so, otherwise
1788 find_constant_src (const_rtx insn
)
1792 set
= single_set (insn
);
1795 x
= avoid_constant_pool_reference (SET_SRC (set
));
1800 note
= find_reg_equal_equiv_note (insn
);
1801 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1802 return XEXP (note
, 0);
1807 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1808 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1811 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1813 /* If it's not a CALL_INSN, it can't possibly have a
1814 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1824 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1826 link
= XEXP (link
, 1))
1827 if (GET_CODE (XEXP (link
, 0)) == code
1828 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1833 unsigned int regno
= REGNO (datum
);
1835 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1836 to pseudo registers, so don't bother checking. */
1838 if (regno
< FIRST_PSEUDO_REGISTER
)
1840 unsigned int end_regno
= END_HARD_REGNO (datum
);
1843 for (i
= regno
; i
< end_regno
; i
++)
1844 if (find_regno_fusage (insn
, code
, i
))
1852 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1853 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1856 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
1860 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1861 to pseudo registers, so don't bother checking. */
1863 if (regno
>= FIRST_PSEUDO_REGISTER
1867 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1871 if (GET_CODE (op
= XEXP (link
, 0)) == code
1872 && REG_P (reg
= XEXP (op
, 0))
1873 && REGNO (reg
) <= regno
1874 && END_HARD_REGNO (reg
) > regno
)
1882 /* Add register note with kind KIND and datum DATUM to INSN. */
1885 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
1893 case REG_LABEL_TARGET
:
1894 case REG_LABEL_OPERAND
:
1895 /* These types of register notes use an INSN_LIST rather than an
1896 EXPR_LIST, so that copying is done right and dumps look
1898 note
= alloc_INSN_LIST (datum
, REG_NOTES (insn
));
1899 PUT_REG_NOTE_KIND (note
, kind
);
1903 note
= alloc_EXPR_LIST (kind
, datum
, REG_NOTES (insn
));
1907 REG_NOTES (insn
) = note
;
1910 /* Remove register note NOTE from the REG_NOTES of INSN. */
1913 remove_note (rtx insn
, const_rtx note
)
1917 if (note
== NULL_RTX
)
1920 if (REG_NOTES (insn
) == note
)
1921 REG_NOTES (insn
) = XEXP (note
, 1);
1923 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1924 if (XEXP (link
, 1) == note
)
1926 XEXP (link
, 1) = XEXP (note
, 1);
1930 switch (REG_NOTE_KIND (note
))
1934 df_notes_rescan (insn
);
1941 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1944 remove_reg_equal_equiv_notes (rtx insn
)
1948 loc
= ®_NOTES (insn
);
1951 enum reg_note kind
= REG_NOTE_KIND (*loc
);
1952 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
1953 *loc
= XEXP (*loc
, 1);
1955 loc
= &XEXP (*loc
, 1);
1959 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1960 return 1 if it is found. A simple equality test is used to determine if
1964 in_expr_list_p (const_rtx listp
, const_rtx node
)
1968 for (x
= listp
; x
; x
= XEXP (x
, 1))
1969 if (node
== XEXP (x
, 0))
1975 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1976 remove that entry from the list if it is found.
1978 A simple equality test is used to determine if NODE matches. */
1981 remove_node_from_expr_list (const_rtx node
, rtx
*listp
)
1984 rtx prev
= NULL_RTX
;
1988 if (node
== XEXP (temp
, 0))
1990 /* Splice the node out of the list. */
1992 XEXP (prev
, 1) = XEXP (temp
, 1);
1994 *listp
= XEXP (temp
, 1);
2000 temp
= XEXP (temp
, 1);
2004 /* Nonzero if X contains any volatile instructions. These are instructions
2005 which may cause unpredictable machine state instructions, and thus no
2006 instructions should be moved or combined across them. This includes
2007 only volatile asms and UNSPEC_VOLATILE instructions. */
2010 volatile_insn_p (const_rtx x
)
2012 const RTX_CODE code
= GET_CODE (x
);
2033 case UNSPEC_VOLATILE
:
2034 /* case TRAP_IF: This isn't clear yet. */
2039 if (MEM_VOLATILE_P (x
))
2046 /* Recursively scan the operands of this expression. */
2049 const char *const fmt
= GET_RTX_FORMAT (code
);
2052 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2056 if (volatile_insn_p (XEXP (x
, i
)))
2059 else if (fmt
[i
] == 'E')
2062 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2063 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2071 /* Nonzero if X contains any volatile memory references
2072 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2075 volatile_refs_p (const_rtx x
)
2077 const RTX_CODE code
= GET_CODE (x
);
2096 case UNSPEC_VOLATILE
:
2102 if (MEM_VOLATILE_P (x
))
2109 /* Recursively scan the operands of this expression. */
2112 const char *const fmt
= GET_RTX_FORMAT (code
);
2115 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2119 if (volatile_refs_p (XEXP (x
, i
)))
2122 else if (fmt
[i
] == 'E')
2125 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2126 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2134 /* Similar to above, except that it also rejects register pre- and post-
2138 side_effects_p (const_rtx x
)
2140 const RTX_CODE code
= GET_CODE (x
);
2159 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2160 when some combination can't be done. If we see one, don't think
2161 that we can simplify the expression. */
2162 return (GET_MODE (x
) != VOIDmode
);
2171 case UNSPEC_VOLATILE
:
2172 /* case TRAP_IF: This isn't clear yet. */
2178 if (MEM_VOLATILE_P (x
))
2185 /* Recursively scan the operands of this expression. */
2188 const char *fmt
= GET_RTX_FORMAT (code
);
2191 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2195 if (side_effects_p (XEXP (x
, i
)))
2198 else if (fmt
[i
] == 'E')
2201 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2202 if (side_effects_p (XVECEXP (x
, i
, j
)))
2210 /* Return nonzero if evaluating rtx X might cause a trap.
2211 FLAGS controls how to consider MEMs. A nonzero means the context
2212 of the access may have changed from the original, such that the
2213 address may have become invalid. */
2216 may_trap_p_1 (const_rtx x
, unsigned flags
)
2222 /* We make no distinction currently, but this function is part of
2223 the internal target-hooks ABI so we keep the parameter as
2224 "unsigned flags". */
2225 bool code_changed
= flags
!= 0;
2229 code
= GET_CODE (x
);
2232 /* Handle these cases quickly. */
2247 case UNSPEC_VOLATILE
:
2248 return targetm
.unspec_may_trap_p (x
, flags
);
2255 return MEM_VOLATILE_P (x
);
2257 /* Memory ref can trap unless it's a static var or a stack slot. */
2259 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2260 reference; moving it out of context such as when moving code
2261 when optimizing, might cause its address to become invalid. */
2263 || !MEM_NOTRAP_P (x
))
2265 HOST_WIDE_INT size
= MEM_SIZE (x
) ? INTVAL (MEM_SIZE (x
)) : 0;
2266 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2267 GET_MODE (x
), code_changed
);
2272 /* Division by a non-constant might trap. */
2277 if (HONOR_SNANS (GET_MODE (x
)))
2279 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2280 return flag_trapping_math
;
2281 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2286 /* An EXPR_LIST is used to represent a function call. This
2287 certainly may trap. */
2296 /* Some floating point comparisons may trap. */
2297 if (!flag_trapping_math
)
2299 /* ??? There is no machine independent way to check for tests that trap
2300 when COMPARE is used, though many targets do make this distinction.
2301 For instance, sparc uses CCFPE for compares which generate exceptions
2302 and CCFP for compares which do not generate exceptions. */
2303 if (HONOR_NANS (GET_MODE (x
)))
2305 /* But often the compare has some CC mode, so check operand
2307 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2308 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2314 if (HONOR_SNANS (GET_MODE (x
)))
2316 /* Often comparison is CC mode, so check operand modes. */
2317 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2318 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2323 /* Conversion of floating point might trap. */
2324 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2331 /* These operations don't trap even with floating point. */
2335 /* Any floating arithmetic may trap. */
2336 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
))
2337 && flag_trapping_math
)
2341 fmt
= GET_RTX_FORMAT (code
);
2342 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2346 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2349 else if (fmt
[i
] == 'E')
2352 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2353 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2360 /* Return nonzero if evaluating rtx X might cause a trap. */
2363 may_trap_p (const_rtx x
)
2365 return may_trap_p_1 (x
, 0);
2368 /* Same as above, but additionally return nonzero if evaluating rtx X might
2369 cause a fault. We define a fault for the purpose of this function as a
2370 erroneous execution condition that cannot be encountered during the normal
2371 execution of a valid program; the typical example is an unaligned memory
2372 access on a strict alignment machine. The compiler guarantees that it
2373 doesn't generate code that will fault from a valid program, but this
2374 guarantee doesn't mean anything for individual instructions. Consider
2375 the following example:
2377 struct S { int d; union { char *cp; int *ip; }; };
2379 int foo(struct S *s)
2387 on a strict alignment machine. In a valid program, foo will never be
2388 invoked on a structure for which d is equal to 1 and the underlying
2389 unique field of the union not aligned on a 4-byte boundary, but the
2390 expression *s->ip might cause a fault if considered individually.
2392 At the RTL level, potentially problematic expressions will almost always
2393 verify may_trap_p; for example, the above dereference can be emitted as
2394 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2395 However, suppose that foo is inlined in a caller that causes s->cp to
2396 point to a local character variable and guarantees that s->d is not set
2397 to 1; foo may have been effectively translated into pseudo-RTL as:
2400 (set (reg:SI) (mem:SI (%fp - 7)))
2402 (set (reg:QI) (mem:QI (%fp - 7)))
2404 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2405 memory reference to a stack slot, but it will certainly cause a fault
2406 on a strict alignment machine. */
2409 may_trap_or_fault_p (const_rtx x
)
2411 return may_trap_p_1 (x
, 1);
2414 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2415 i.e., an inequality. */
2418 inequality_comparisons_p (const_rtx x
)
2422 const enum rtx_code code
= GET_CODE (x
);
2453 len
= GET_RTX_LENGTH (code
);
2454 fmt
= GET_RTX_FORMAT (code
);
2456 for (i
= 0; i
< len
; i
++)
2460 if (inequality_comparisons_p (XEXP (x
, i
)))
2463 else if (fmt
[i
] == 'E')
2466 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2467 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2475 /* Replace any occurrence of FROM in X with TO. The function does
2476 not enter into CONST_DOUBLE for the replace.
2478 Note that copying is not done so X must not be shared unless all copies
2479 are to be modified. */
2482 replace_rtx (rtx x
, rtx from
, rtx to
)
2487 /* The following prevents loops occurrence when we change MEM in
2488 CONST_DOUBLE onto the same CONST_DOUBLE. */
2489 if (x
!= 0 && GET_CODE (x
) == CONST_DOUBLE
)
2495 /* Allow this function to make replacements in EXPR_LISTs. */
2499 if (GET_CODE (x
) == SUBREG
)
2501 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2503 if (GET_CODE (new_rtx
) == CONST_INT
)
2505 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2506 GET_MODE (SUBREG_REG (x
)),
2511 SUBREG_REG (x
) = new_rtx
;
2515 else if (GET_CODE (x
) == ZERO_EXTEND
)
2517 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2519 if (GET_CODE (new_rtx
) == CONST_INT
)
2521 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2522 new_rtx
, GET_MODE (XEXP (x
, 0)));
2526 XEXP (x
, 0) = new_rtx
;
2531 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2532 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2535 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2536 else if (fmt
[i
] == 'E')
2537 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2538 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2544 /* Replace occurrences of the old label in *X with the new one.
2545 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2548 replace_label (rtx
*x
, void *data
)
2551 rtx old_label
= ((replace_label_data
*) data
)->r1
;
2552 rtx new_label
= ((replace_label_data
*) data
)->r2
;
2553 bool update_label_nuses
= ((replace_label_data
*) data
)->update_label_nuses
;
2558 if (GET_CODE (l
) == SYMBOL_REF
2559 && CONSTANT_POOL_ADDRESS_P (l
))
2561 rtx c
= get_pool_constant (l
);
2562 if (rtx_referenced_p (old_label
, c
))
2565 replace_label_data
*d
= (replace_label_data
*) data
;
2567 /* Create a copy of constant C; replace the label inside
2568 but do not update LABEL_NUSES because uses in constant pool
2570 new_c
= copy_rtx (c
);
2571 d
->update_label_nuses
= false;
2572 for_each_rtx (&new_c
, replace_label
, data
);
2573 d
->update_label_nuses
= update_label_nuses
;
2575 /* Add the new constant NEW_C to constant pool and replace
2576 the old reference to constant by new reference. */
2577 new_l
= XEXP (force_const_mem (get_pool_mode (l
), new_c
), 0);
2578 *x
= replace_rtx (l
, l
, new_l
);
2583 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2584 field. This is not handled by for_each_rtx because it doesn't
2585 handle unprinted ('0') fields. */
2586 if (JUMP_P (l
) && JUMP_LABEL (l
) == old_label
)
2587 JUMP_LABEL (l
) = new_label
;
2589 if ((GET_CODE (l
) == LABEL_REF
2590 || GET_CODE (l
) == INSN_LIST
)
2591 && XEXP (l
, 0) == old_label
)
2593 XEXP (l
, 0) = new_label
;
2594 if (update_label_nuses
)
2596 ++LABEL_NUSES (new_label
);
2597 --LABEL_NUSES (old_label
);
2605 /* When *BODY is equal to X or X is directly referenced by *BODY
2606 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2607 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2610 rtx_referenced_p_1 (rtx
*body
, void *x
)
2614 if (*body
== NULL_RTX
)
2615 return y
== NULL_RTX
;
2617 /* Return true if a label_ref *BODY refers to label Y. */
2618 if (GET_CODE (*body
) == LABEL_REF
&& LABEL_P (y
))
2619 return XEXP (*body
, 0) == y
;
2621 /* If *BODY is a reference to pool constant traverse the constant. */
2622 if (GET_CODE (*body
) == SYMBOL_REF
2623 && CONSTANT_POOL_ADDRESS_P (*body
))
2624 return rtx_referenced_p (y
, get_pool_constant (*body
));
2626 /* By default, compare the RTL expressions. */
2627 return rtx_equal_p (*body
, y
);
2630 /* Return true if X is referenced in BODY. */
2633 rtx_referenced_p (rtx x
, rtx body
)
2635 return for_each_rtx (&body
, rtx_referenced_p_1
, x
);
2638 /* If INSN is a tablejump return true and store the label (before jump table) to
2639 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2642 tablejump_p (const_rtx insn
, rtx
*labelp
, rtx
*tablep
)
2647 && (label
= JUMP_LABEL (insn
)) != NULL_RTX
2648 && (table
= next_active_insn (label
)) != NULL_RTX
2650 && (GET_CODE (PATTERN (table
)) == ADDR_VEC
2651 || GET_CODE (PATTERN (table
)) == ADDR_DIFF_VEC
))
2662 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2663 constant that is not in the constant pool and not in the condition
2664 of an IF_THEN_ELSE. */
2667 computed_jump_p_1 (const_rtx x
)
2669 const enum rtx_code code
= GET_CODE (x
);
2689 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2690 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2693 return (computed_jump_p_1 (XEXP (x
, 1))
2694 || computed_jump_p_1 (XEXP (x
, 2)));
2700 fmt
= GET_RTX_FORMAT (code
);
2701 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2704 && computed_jump_p_1 (XEXP (x
, i
)))
2707 else if (fmt
[i
] == 'E')
2708 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2709 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2716 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2718 Tablejumps and casesi insns are not considered indirect jumps;
2719 we can recognize them by a (use (label_ref)). */
2722 computed_jump_p (const_rtx insn
)
2727 rtx pat
= PATTERN (insn
);
2729 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2730 if (JUMP_LABEL (insn
) != NULL
)
2733 if (GET_CODE (pat
) == PARALLEL
)
2735 int len
= XVECLEN (pat
, 0);
2736 int has_use_labelref
= 0;
2738 for (i
= len
- 1; i
>= 0; i
--)
2739 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2740 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2742 has_use_labelref
= 1;
2744 if (! has_use_labelref
)
2745 for (i
= len
- 1; i
>= 0; i
--)
2746 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2747 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2748 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2751 else if (GET_CODE (pat
) == SET
2752 && SET_DEST (pat
) == pc_rtx
2753 && computed_jump_p_1 (SET_SRC (pat
)))
2759 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2760 calls. Processes the subexpressions of EXP and passes them to F. */
2762 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
2765 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
2768 for (; format
[n
] != '\0'; n
++)
2775 result
= (*f
) (x
, data
);
2777 /* Do not traverse sub-expressions. */
2779 else if (result
!= 0)
2780 /* Stop the traversal. */
2784 /* There are no sub-expressions. */
2787 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2790 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2798 if (XVEC (exp
, n
) == 0)
2800 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
2803 x
= &XVECEXP (exp
, n
, j
);
2804 result
= (*f
) (x
, data
);
2806 /* Do not traverse sub-expressions. */
2808 else if (result
!= 0)
2809 /* Stop the traversal. */
2813 /* There are no sub-expressions. */
2816 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2819 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2827 /* Nothing to do. */
2835 /* Traverse X via depth-first search, calling F for each
2836 sub-expression (including X itself). F is also passed the DATA.
2837 If F returns -1, do not traverse sub-expressions, but continue
2838 traversing the rest of the tree. If F ever returns any other
2839 nonzero value, stop the traversal, and return the value returned
2840 by F. Otherwise, return 0. This function does not traverse inside
2841 tree structure that contains RTX_EXPRs, or into sub-expressions
2842 whose format code is `0' since it is not known whether or not those
2843 codes are actually RTL.
2845 This routine is very general, and could (should?) be used to
2846 implement many of the other routines in this file. */
2849 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
2855 result
= (*f
) (x
, data
);
2857 /* Do not traverse sub-expressions. */
2859 else if (result
!= 0)
2860 /* Stop the traversal. */
2864 /* There are no sub-expressions. */
2867 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2871 return for_each_rtx_1 (*x
, i
, f
, data
);
2875 /* Searches X for any reference to REGNO, returning the rtx of the
2876 reference found if any. Otherwise, returns NULL_RTX. */
2879 regno_use_in (unsigned int regno
, rtx x
)
2885 if (REG_P (x
) && REGNO (x
) == regno
)
2888 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2889 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2893 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
2896 else if (fmt
[i
] == 'E')
2897 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2898 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
2905 /* Return a value indicating whether OP, an operand of a commutative
2906 operation, is preferred as the first or second operand. The higher
2907 the value, the stronger the preference for being the first operand.
2908 We use negative values to indicate a preference for the first operand
2909 and positive values for the second operand. */
2912 commutative_operand_precedence (rtx op
)
2914 enum rtx_code code
= GET_CODE (op
);
2916 /* Constants always come the second operand. Prefer "nice" constants. */
2917 if (code
== CONST_INT
)
2919 if (code
== CONST_DOUBLE
)
2921 if (code
== CONST_FIXED
)
2923 op
= avoid_constant_pool_reference (op
);
2924 code
= GET_CODE (op
);
2926 switch (GET_RTX_CLASS (code
))
2929 if (code
== CONST_INT
)
2931 if (code
== CONST_DOUBLE
)
2933 if (code
== CONST_FIXED
)
2938 /* SUBREGs of objects should come second. */
2939 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
2944 /* Complex expressions should be the first, so decrease priority
2945 of objects. Prefer pointer objects over non pointer objects. */
2946 if ((REG_P (op
) && REG_POINTER (op
))
2947 || (MEM_P (op
) && MEM_POINTER (op
)))
2951 case RTX_COMM_ARITH
:
2952 /* Prefer operands that are themselves commutative to be first.
2953 This helps to make things linear. In particular,
2954 (and (and (reg) (reg)) (not (reg))) is canonical. */
2958 /* If only one operand is a binary expression, it will be the first
2959 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2960 is canonical, although it will usually be further simplified. */
2964 /* Then prefer NEG and NOT. */
2965 if (code
== NEG
|| code
== NOT
)
2973 /* Return 1 iff it is necessary to swap operands of commutative operation
2974 in order to canonicalize expression. */
2977 swap_commutative_operands_p (rtx x
, rtx y
)
2979 return (commutative_operand_precedence (x
)
2980 < commutative_operand_precedence (y
));
2983 /* Return 1 if X is an autoincrement side effect and the register is
2984 not the stack pointer. */
2986 auto_inc_p (const_rtx x
)
2988 switch (GET_CODE (x
))
2996 /* There are no REG_INC notes for SP. */
2997 if (XEXP (x
, 0) != stack_pointer_rtx
)
3005 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3007 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3016 code
= GET_CODE (in
);
3017 fmt
= GET_RTX_FORMAT (code
);
3018 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3022 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3025 else if (fmt
[i
] == 'E')
3026 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3027 if (loc
== &XVECEXP (in
, i
, j
)
3028 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3034 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3035 and SUBREG_BYTE, return the bit offset where the subreg begins
3036 (counting from the least significant bit of the operand). */
3039 subreg_lsb_1 (enum machine_mode outer_mode
,
3040 enum machine_mode inner_mode
,
3041 unsigned int subreg_byte
)
3043 unsigned int bitpos
;
3047 /* A paradoxical subreg begins at bit position 0. */
3048 if (GET_MODE_BITSIZE (outer_mode
) > GET_MODE_BITSIZE (inner_mode
))
3051 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3052 /* If the subreg crosses a word boundary ensure that
3053 it also begins and ends on a word boundary. */
3054 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3055 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3056 && (subreg_byte
% UNITS_PER_WORD
3057 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3059 if (WORDS_BIG_ENDIAN
)
3060 word
= (GET_MODE_SIZE (inner_mode
)
3061 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3063 word
= subreg_byte
/ UNITS_PER_WORD
;
3064 bitpos
= word
* BITS_PER_WORD
;
3066 if (BYTES_BIG_ENDIAN
)
3067 byte
= (GET_MODE_SIZE (inner_mode
)
3068 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3070 byte
= subreg_byte
% UNITS_PER_WORD
;
3071 bitpos
+= byte
* BITS_PER_UNIT
;
3076 /* Given a subreg X, return the bit offset where the subreg begins
3077 (counting from the least significant bit of the reg). */
3080 subreg_lsb (const_rtx x
)
3082 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3086 /* Fill in information about a subreg of a hard register.
3087 xregno - A regno of an inner hard subreg_reg (or what will become one).
3088 xmode - The mode of xregno.
3089 offset - The byte offset.
3090 ymode - The mode of a top level SUBREG (or what may become one).
3091 info - Pointer to structure to fill in. */
3093 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3094 unsigned int offset
, enum machine_mode ymode
,
3095 struct subreg_info
*info
)
3097 int nregs_xmode
, nregs_ymode
;
3098 int mode_multiple
, nregs_multiple
;
3099 int offset_adj
, y_offset
, y_offset_adj
;
3100 int regsize_xmode
, regsize_ymode
;
3103 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3107 /* If there are holes in a non-scalar mode in registers, we expect
3108 that it is made up of its units concatenated together. */
3109 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3111 enum machine_mode xmode_unit
;
3113 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3114 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3117 xmode_unit
= GET_MODE_INNER (xmode
);
3118 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3119 gcc_assert (nregs_xmode
3120 == (GET_MODE_NUNITS (xmode
)
3121 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3122 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3123 == (hard_regno_nregs
[xregno
][xmode_unit
]
3124 * GET_MODE_NUNITS (xmode
)));
3126 /* You can only ask for a SUBREG of a value with holes in the middle
3127 if you don't cross the holes. (Such a SUBREG should be done by
3128 picking a different register class, or doing it in memory if
3129 necessary.) An example of a value with holes is XCmode on 32-bit
3130 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3131 3 for each part, but in memory it's two 128-bit parts.
3132 Padding is assumed to be at the end (not necessarily the 'high part')
3134 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3135 < GET_MODE_NUNITS (xmode
))
3136 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3137 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3138 / GET_MODE_SIZE (xmode_unit
))))
3140 info
->representable_p
= false;
3145 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3147 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3149 /* Paradoxical subregs are otherwise valid. */
3152 && GET_MODE_SIZE (ymode
) > GET_MODE_SIZE (xmode
))
3154 info
->representable_p
= true;
3155 /* If this is a big endian paradoxical subreg, which uses more
3156 actual hard registers than the original register, we must
3157 return a negative offset so that we find the proper highpart
3159 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3160 ? WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3161 info
->offset
= nregs_xmode
- nregs_ymode
;
3164 info
->nregs
= nregs_ymode
;
3168 /* If registers store different numbers of bits in the different
3169 modes, we cannot generally form this subreg. */
3170 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3171 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3172 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3173 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3175 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3176 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3177 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3179 info
->representable_p
= false;
3181 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3182 info
->offset
= offset
/ regsize_xmode
;
3185 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3187 info
->representable_p
= false;
3189 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3190 info
->offset
= offset
/ regsize_xmode
;
3195 /* Lowpart subregs are otherwise valid. */
3196 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3198 info
->representable_p
= true;
3201 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3204 info
->nregs
= nregs_ymode
;
3209 /* This should always pass, otherwise we don't know how to verify
3210 the constraint. These conditions may be relaxed but
3211 subreg_regno_offset would need to be redesigned. */
3212 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3213 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3215 /* The XMODE value can be seen as a vector of NREGS_XMODE
3216 values. The subreg must represent a lowpart of given field.
3217 Compute what field it is. */
3218 offset_adj
= offset
;
3219 offset_adj
-= subreg_lowpart_offset (ymode
,
3220 mode_for_size (GET_MODE_BITSIZE (xmode
)
3224 /* Size of ymode must not be greater than the size of xmode. */
3225 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3226 gcc_assert (mode_multiple
!= 0);
3228 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3229 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3230 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3232 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3233 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3237 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3240 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3241 info
->nregs
= nregs_ymode
;
3244 /* This function returns the regno offset of a subreg expression.
3245 xregno - A regno of an inner hard subreg_reg (or what will become one).
3246 xmode - The mode of xregno.
3247 offset - The byte offset.
3248 ymode - The mode of a top level SUBREG (or what may become one).
3249 RETURN - The regno offset which would be used. */
3251 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3252 unsigned int offset
, enum machine_mode ymode
)
3254 struct subreg_info info
;
3255 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3259 /* This function returns true when the offset is representable via
3260 subreg_offset in the given regno.
3261 xregno - A regno of an inner hard subreg_reg (or what will become one).
3262 xmode - The mode of xregno.
3263 offset - The byte offset.
3264 ymode - The mode of a top level SUBREG (or what may become one).
3265 RETURN - Whether the offset is representable. */
3267 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3268 unsigned int offset
, enum machine_mode ymode
)
3270 struct subreg_info info
;
3271 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3272 return info
.representable_p
;
3275 /* Return the number of a YMODE register to which
3277 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3279 can be simplified. Return -1 if the subreg can't be simplified.
3281 XREGNO is a hard register number. */
3284 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3285 unsigned int offset
, enum machine_mode ymode
)
3287 struct subreg_info info
;
3288 unsigned int yregno
;
3290 #ifdef CANNOT_CHANGE_MODE_CLASS
3291 /* Give the backend a chance to disallow the mode change. */
3292 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3293 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3294 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
))
3298 /* We shouldn't simplify stack-related registers. */
3299 if ((!reload_completed
|| frame_pointer_needed
)
3300 && (xregno
== FRAME_POINTER_REGNUM
3301 || xregno
== HARD_FRAME_POINTER_REGNUM
))
3304 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3305 && xregno
== ARG_POINTER_REGNUM
)
3308 if (xregno
== STACK_POINTER_REGNUM
)
3311 /* Try to get the register offset. */
3312 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3313 if (!info
.representable_p
)
3316 /* Make sure that the offsetted register value is in range. */
3317 yregno
= xregno
+ info
.offset
;
3318 if (!HARD_REGISTER_NUM_P (yregno
))
3321 /* See whether (reg:YMODE YREGNO) is valid.
3323 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3324 This is a kludge to work around how float/complex arguments are passed
3325 on 32-bit SPARC and should be fixed. */
3326 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3327 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3330 return (int) yregno
;
3333 /* Return the final regno that a subreg expression refers to. */
3335 subreg_regno (const_rtx x
)
3338 rtx subreg
= SUBREG_REG (x
);
3339 int regno
= REGNO (subreg
);
3341 ret
= regno
+ subreg_regno_offset (regno
,
3349 /* Return the number of registers that a subreg expression refers
3352 subreg_nregs (const_rtx x
)
3354 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3357 /* Return the number of registers that a subreg REG with REGNO
3358 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3359 changed so that the regno can be passed in. */
3362 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3364 struct subreg_info info
;
3365 rtx subreg
= SUBREG_REG (x
);
3367 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3373 struct parms_set_data
3379 /* Helper function for noticing stores to parameter registers. */
3381 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3383 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3384 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3385 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3387 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3392 /* Look backward for first parameter to be loaded.
3393 Note that loads of all parameters will not necessarily be
3394 found if CSE has eliminated some of them (e.g., an argument
3395 to the outer function is passed down as a parameter).
3396 Do not skip BOUNDARY. */
3398 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3400 struct parms_set_data parm
;
3401 rtx p
, before
, first_set
;
3403 /* Since different machines initialize their parameter registers
3404 in different orders, assume nothing. Collect the set of all
3405 parameter registers. */
3406 CLEAR_HARD_REG_SET (parm
.regs
);
3408 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3409 if (GET_CODE (XEXP (p
, 0)) == USE
3410 && REG_P (XEXP (XEXP (p
, 0), 0)))
3412 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3414 /* We only care about registers which can hold function
3416 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3419 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3423 first_set
= call_insn
;
3425 /* Search backward for the first set of a register in this set. */
3426 while (parm
.nregs
&& before
!= boundary
)
3428 before
= PREV_INSN (before
);
3430 /* It is possible that some loads got CSEed from one call to
3431 another. Stop in that case. */
3432 if (CALL_P (before
))
3435 /* Our caller needs either ensure that we will find all sets
3436 (in case code has not been optimized yet), or take care
3437 for possible labels in a way by setting boundary to preceding
3439 if (LABEL_P (before
))
3441 gcc_assert (before
== boundary
);
3445 if (INSN_P (before
))
3447 int nregs_old
= parm
.nregs
;
3448 note_stores (PATTERN (before
), parms_set
, &parm
);
3449 /* If we found something that did not set a parameter reg,
3450 we're done. Do not keep going, as that might result
3451 in hoisting an insn before the setting of a pseudo
3452 that is used by the hoisted insn. */
3453 if (nregs_old
!= parm
.nregs
)
3462 /* Return true if we should avoid inserting code between INSN and preceding
3463 call instruction. */
3466 keep_with_call_p (const_rtx insn
)
3470 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3472 if (REG_P (SET_DEST (set
))
3473 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3474 && fixed_regs
[REGNO (SET_DEST (set
))]
3475 && general_operand (SET_SRC (set
), VOIDmode
))
3477 if (REG_P (SET_SRC (set
))
3478 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set
)))
3479 && REG_P (SET_DEST (set
))
3480 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3482 /* There may be a stack pop just after the call and before the store
3483 of the return register. Search for the actual store when deciding
3484 if we can break or not. */
3485 if (SET_DEST (set
) == stack_pointer_rtx
)
3487 /* This CONST_CAST is okay because next_nonnote_insn just
3488 returns its argument and we assign it to a const_rtx
3490 const_rtx i2
= next_nonnote_insn (CONST_CAST_RTX(insn
));
3491 if (i2
&& keep_with_call_p (i2
))
3498 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3499 to non-complex jumps. That is, direct unconditional, conditional,
3500 and tablejumps, but not computed jumps or returns. It also does
3501 not apply to the fallthru case of a conditional jump. */
3504 label_is_jump_target_p (const_rtx label
, const_rtx jump_insn
)
3506 rtx tmp
= JUMP_LABEL (jump_insn
);
3511 if (tablejump_p (jump_insn
, NULL
, &tmp
))
3513 rtvec vec
= XVEC (PATTERN (tmp
),
3514 GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
);
3515 int i
, veclen
= GET_NUM_ELEM (vec
);
3517 for (i
= 0; i
< veclen
; ++i
)
3518 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3522 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3529 /* Return an estimate of the cost of computing rtx X.
3530 One use is in cse, to decide which expression to keep in the hash table.
3531 Another is in rtl generation, to pick the cheapest way to multiply.
3532 Other uses like the latter are expected in the future.
3534 SPEED parameter specify whether costs optimized for speed or size should
3538 rtx_cost (rtx x
, enum rtx_code outer_code ATTRIBUTE_UNUSED
, bool speed
)
3548 /* Compute the default costs of certain things.
3549 Note that targetm.rtx_costs can override the defaults. */
3551 code
= GET_CODE (x
);
3555 total
= COSTS_N_INSNS (5);
3561 total
= COSTS_N_INSNS (7);
3564 /* Used in combine.c as a marker. */
3568 total
= COSTS_N_INSNS (1);
3578 /* If we can't tie these modes, make this expensive. The larger
3579 the mode, the more expensive it is. */
3580 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3581 return COSTS_N_INSNS (2
3582 + GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
);
3586 if (targetm
.rtx_costs (x
, code
, outer_code
, &total
, speed
))
3591 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3592 which is already in total. */
3594 fmt
= GET_RTX_FORMAT (code
);
3595 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3597 total
+= rtx_cost (XEXP (x
, i
), code
, speed
);
3598 else if (fmt
[i
] == 'E')
3599 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3600 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, speed
);
3605 /* Return cost of address expression X.
3606 Expect that X is properly formed address reference.
3608 SPEED parameter specify whether costs optimized for speed or size should
3612 address_cost (rtx x
, enum machine_mode mode
, bool speed
)
3614 /* We may be asked for cost of various unusual addresses, such as operands
3615 of push instruction. It is not worthwhile to complicate writing
3616 of the target hook by such cases. */
3618 if (!memory_address_p (mode
, x
))
3621 return targetm
.address_cost (x
, speed
);
3624 /* If the target doesn't override, compute the cost as with arithmetic. */
3627 default_address_cost (rtx x
, bool speed
)
3629 return rtx_cost (x
, MEM
, speed
);
3633 unsigned HOST_WIDE_INT
3634 nonzero_bits (const_rtx x
, enum machine_mode mode
)
3636 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3640 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
3642 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3645 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3646 It avoids exponential behavior in nonzero_bits1 when X has
3647 identical subexpressions on the first or the second level. */
3649 static unsigned HOST_WIDE_INT
3650 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3651 enum machine_mode known_mode
,
3652 unsigned HOST_WIDE_INT known_ret
)
3654 if (x
== known_x
&& mode
== known_mode
)
3657 /* Try to find identical subexpressions. If found call
3658 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3659 precomputed value for the subexpression as KNOWN_RET. */
3661 if (ARITHMETIC_P (x
))
3663 rtx x0
= XEXP (x
, 0);
3664 rtx x1
= XEXP (x
, 1);
3666 /* Check the first level. */
3668 return nonzero_bits1 (x
, mode
, x0
, mode
,
3669 cached_nonzero_bits (x0
, mode
, known_x
,
3670 known_mode
, known_ret
));
3672 /* Check the second level. */
3673 if (ARITHMETIC_P (x0
)
3674 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3675 return nonzero_bits1 (x
, mode
, x1
, mode
,
3676 cached_nonzero_bits (x1
, mode
, known_x
,
3677 known_mode
, known_ret
));
3679 if (ARITHMETIC_P (x1
)
3680 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3681 return nonzero_bits1 (x
, mode
, x0
, mode
,
3682 cached_nonzero_bits (x0
, mode
, known_x
,
3683 known_mode
, known_ret
));
3686 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3689 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3690 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3691 is less useful. We can't allow both, because that results in exponential
3692 run time recursion. There is a nullstone testcase that triggered
3693 this. This macro avoids accidental uses of num_sign_bit_copies. */
3694 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3696 /* Given an expression, X, compute which bits in X can be nonzero.
3697 We don't care about bits outside of those defined in MODE.
3699 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3700 an arithmetic operation, we can do better. */
3702 static unsigned HOST_WIDE_INT
3703 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3704 enum machine_mode known_mode
,
3705 unsigned HOST_WIDE_INT known_ret
)
3707 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3708 unsigned HOST_WIDE_INT inner_nz
;
3710 unsigned int mode_width
= GET_MODE_BITSIZE (mode
);
3712 /* For floating-point and vector values, assume all bits are needed. */
3713 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
3714 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
3717 /* If X is wider than MODE, use its mode instead. */
3718 if (GET_MODE_BITSIZE (GET_MODE (x
)) > mode_width
)
3720 mode
= GET_MODE (x
);
3721 nonzero
= GET_MODE_MASK (mode
);
3722 mode_width
= GET_MODE_BITSIZE (mode
);
3725 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
3726 /* Our only callers in this case look for single bit values. So
3727 just return the mode mask. Those tests will then be false. */
3730 #ifndef WORD_REGISTER_OPERATIONS
3731 /* If MODE is wider than X, but both are a single word for both the host
3732 and target machines, we can compute this from which bits of the
3733 object might be nonzero in its own mode, taking into account the fact
3734 that on many CISC machines, accessing an object in a wider mode
3735 causes the high-order bits to become undefined. So they are
3736 not known to be zero. */
3738 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
3739 && GET_MODE_BITSIZE (GET_MODE (x
)) <= BITS_PER_WORD
3740 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
3741 && GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (GET_MODE (x
)))
3743 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
3744 known_x
, known_mode
, known_ret
);
3745 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
3750 code
= GET_CODE (x
);
3754 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3755 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3756 all the bits above ptr_mode are known to be zero. */
3757 if (POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
3759 nonzero
&= GET_MODE_MASK (ptr_mode
);
3762 /* Include declared information about alignment of pointers. */
3763 /* ??? We don't properly preserve REG_POINTER changes across
3764 pointer-to-integer casts, so we can't trust it except for
3765 things that we know must be pointers. See execute/960116-1.c. */
3766 if ((x
== stack_pointer_rtx
3767 || x
== frame_pointer_rtx
3768 || x
== arg_pointer_rtx
)
3769 && REGNO_POINTER_ALIGN (REGNO (x
)))
3771 unsigned HOST_WIDE_INT alignment
3772 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
3774 #ifdef PUSH_ROUNDING
3775 /* If PUSH_ROUNDING is defined, it is possible for the
3776 stack to be momentarily aligned only to that amount,
3777 so we pick the least alignment. */
3778 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
3779 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
3783 nonzero
&= ~(alignment
- 1);
3787 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
3788 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
3789 known_mode
, known_ret
,
3793 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
3794 known_mode
, known_ret
);
3796 return nonzero_for_hook
;
3800 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3801 /* If X is negative in MODE, sign-extend the value. */
3802 if (INTVAL (x
) > 0 && mode_width
< BITS_PER_WORD
3803 && 0 != (INTVAL (x
) & ((HOST_WIDE_INT
) 1 << (mode_width
- 1))))
3804 return (INTVAL (x
) | ((HOST_WIDE_INT
) (-1) << mode_width
));
3810 #ifdef LOAD_EXTEND_OP
3811 /* In many, if not most, RISC machines, reading a byte from memory
3812 zeros the rest of the register. Noticing that fact saves a lot
3813 of extra zero-extends. */
3814 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
3815 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
3820 case UNEQ
: case LTGT
:
3821 case GT
: case GTU
: case UNGT
:
3822 case LT
: case LTU
: case UNLT
:
3823 case GE
: case GEU
: case UNGE
:
3824 case LE
: case LEU
: case UNLE
:
3825 case UNORDERED
: case ORDERED
:
3826 /* If this produces an integer result, we know which bits are set.
3827 Code here used to clear bits outside the mode of X, but that is
3829 /* Mind that MODE is the mode the caller wants to look at this
3830 operation in, and not the actual operation mode. We can wind
3831 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3832 that describes the results of a vector compare. */
3833 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
3834 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
3835 nonzero
= STORE_FLAG_VALUE
;
3840 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3841 and num_sign_bit_copies. */
3842 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
3843 == GET_MODE_BITSIZE (GET_MODE (x
)))
3847 if (GET_MODE_SIZE (GET_MODE (x
)) < mode_width
)
3848 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
3853 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3854 and num_sign_bit_copies. */
3855 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
3856 == GET_MODE_BITSIZE (GET_MODE (x
)))
3862 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
3863 known_x
, known_mode
, known_ret
)
3864 & GET_MODE_MASK (mode
));
3868 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
3869 known_x
, known_mode
, known_ret
);
3870 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
3871 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
3875 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3876 Otherwise, show all the bits in the outer mode but not the inner
3878 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
3879 known_x
, known_mode
, known_ret
);
3880 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
3882 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
3884 & (((HOST_WIDE_INT
) 1
3885 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0))) - 1))))
3886 inner_nz
|= (GET_MODE_MASK (mode
)
3887 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
3890 nonzero
&= inner_nz
;
3894 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
3895 known_x
, known_mode
, known_ret
)
3896 & cached_nonzero_bits (XEXP (x
, 1), mode
,
3897 known_x
, known_mode
, known_ret
);
3901 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
3903 unsigned HOST_WIDE_INT nonzero0
=
3904 cached_nonzero_bits (XEXP (x
, 0), mode
,
3905 known_x
, known_mode
, known_ret
);
3907 /* Don't call nonzero_bits for the second time if it cannot change
3909 if ((nonzero
& nonzero0
) != nonzero
)
3911 | cached_nonzero_bits (XEXP (x
, 1), mode
,
3912 known_x
, known_mode
, known_ret
);
3916 case PLUS
: case MINUS
:
3918 case DIV
: case UDIV
:
3919 case MOD
: case UMOD
:
3920 /* We can apply the rules of arithmetic to compute the number of
3921 high- and low-order zero bits of these operations. We start by
3922 computing the width (position of the highest-order nonzero bit)
3923 and the number of low-order zero bits for each value. */
3925 unsigned HOST_WIDE_INT nz0
=
3926 cached_nonzero_bits (XEXP (x
, 0), mode
,
3927 known_x
, known_mode
, known_ret
);
3928 unsigned HOST_WIDE_INT nz1
=
3929 cached_nonzero_bits (XEXP (x
, 1), mode
,
3930 known_x
, known_mode
, known_ret
);
3931 int sign_index
= GET_MODE_BITSIZE (GET_MODE (x
)) - 1;
3932 int width0
= floor_log2 (nz0
) + 1;
3933 int width1
= floor_log2 (nz1
) + 1;
3934 int low0
= floor_log2 (nz0
& -nz0
);
3935 int low1
= floor_log2 (nz1
& -nz1
);
3936 HOST_WIDE_INT op0_maybe_minusp
3937 = (nz0
& ((HOST_WIDE_INT
) 1 << sign_index
));
3938 HOST_WIDE_INT op1_maybe_minusp
3939 = (nz1
& ((HOST_WIDE_INT
) 1 << sign_index
));
3940 unsigned int result_width
= mode_width
;
3946 result_width
= MAX (width0
, width1
) + 1;
3947 result_low
= MIN (low0
, low1
);
3950 result_low
= MIN (low0
, low1
);
3953 result_width
= width0
+ width1
;
3954 result_low
= low0
+ low1
;
3959 if (! op0_maybe_minusp
&& ! op1_maybe_minusp
)
3960 result_width
= width0
;
3965 result_width
= width0
;
3970 if (! op0_maybe_minusp
&& ! op1_maybe_minusp
)
3971 result_width
= MIN (width0
, width1
);
3972 result_low
= MIN (low0
, low1
);
3977 result_width
= MIN (width0
, width1
);
3978 result_low
= MIN (low0
, low1
);
3984 if (result_width
< mode_width
)
3985 nonzero
&= ((HOST_WIDE_INT
) 1 << result_width
) - 1;
3988 nonzero
&= ~(((HOST_WIDE_INT
) 1 << result_low
) - 1);
3990 #ifdef POINTERS_EXTEND_UNSIGNED
3991 /* If pointers extend unsigned and this is an addition or subtraction
3992 to a pointer in Pmode, all the bits above ptr_mode are known to be
3994 if (POINTERS_EXTEND_UNSIGNED
> 0 && GET_MODE (x
) == Pmode
3995 && (code
== PLUS
|| code
== MINUS
)
3996 && REG_P (XEXP (x
, 0)) && REG_POINTER (XEXP (x
, 0)))
3997 nonzero
&= GET_MODE_MASK (ptr_mode
);
4003 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
4004 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4005 nonzero
&= ((HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4009 /* If this is a SUBREG formed for a promoted variable that has
4010 been zero-extended, we know that at least the high-order bits
4011 are zero, though others might be too. */
4013 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
) > 0)
4014 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4015 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4016 known_x
, known_mode
, known_ret
);
4018 /* If the inner mode is a single word for both the host and target
4019 machines, we can compute this from which bits of the inner
4020 object might be nonzero. */
4021 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) <= BITS_PER_WORD
4022 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
)))
4023 <= HOST_BITS_PER_WIDE_INT
))
4025 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4026 known_x
, known_mode
, known_ret
);
4028 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4029 /* If this is a typical RISC machine, we only have to worry
4030 about the way loads are extended. */
4031 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4033 & (((unsigned HOST_WIDE_INT
) 1
4034 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) - 1))))
4036 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) != ZERO_EXTEND
)
4037 || !MEM_P (SUBREG_REG (x
)))
4040 /* On many CISC machines, accessing an object in a wider mode
4041 causes the high-order bits to become undefined. So they are
4042 not known to be zero. */
4043 if (GET_MODE_SIZE (GET_MODE (x
))
4044 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
4045 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4046 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
))));
4055 /* The nonzero bits are in two classes: any bits within MODE
4056 that aren't in GET_MODE (x) are always significant. The rest of the
4057 nonzero bits are those that are significant in the operand of
4058 the shift when shifted the appropriate number of bits. This
4059 shows that high-order bits are cleared by the right shift and
4060 low-order bits by left shifts. */
4061 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
4062 && INTVAL (XEXP (x
, 1)) >= 0
4063 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4065 enum machine_mode inner_mode
= GET_MODE (x
);
4066 unsigned int width
= GET_MODE_BITSIZE (inner_mode
);
4067 int count
= INTVAL (XEXP (x
, 1));
4068 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4069 unsigned HOST_WIDE_INT op_nonzero
=
4070 cached_nonzero_bits (XEXP (x
, 0), mode
,
4071 known_x
, known_mode
, known_ret
);
4072 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4073 unsigned HOST_WIDE_INT outer
= 0;
4075 if (mode_width
> width
)
4076 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4078 if (code
== LSHIFTRT
)
4080 else if (code
== ASHIFTRT
)
4084 /* If the sign bit may have been nonzero before the shift, we
4085 need to mark all the places it could have been copied to
4086 by the shift as possibly nonzero. */
4087 if (inner
& ((HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4088 inner
|= (((HOST_WIDE_INT
) 1 << count
) - 1) << (width
- count
);
4090 else if (code
== ASHIFT
)
4093 inner
= ((inner
<< (count
% width
)
4094 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4096 nonzero
&= (outer
| inner
);
4102 /* This is at most the number of bits in the mode. */
4103 nonzero
= ((HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4107 /* If CLZ has a known value at zero, then the nonzero bits are
4108 that value, plus the number of bits in the mode minus one. */
4109 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4110 nonzero
|= ((HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4116 /* If CTZ has a known value at zero, then the nonzero bits are
4117 that value, plus the number of bits in the mode minus one. */
4118 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4119 nonzero
|= ((HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4130 unsigned HOST_WIDE_INT nonzero_true
=
4131 cached_nonzero_bits (XEXP (x
, 1), mode
,
4132 known_x
, known_mode
, known_ret
);
4134 /* Don't call nonzero_bits for the second time if it cannot change
4136 if ((nonzero
& nonzero_true
) != nonzero
)
4137 nonzero
&= nonzero_true
4138 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4139 known_x
, known_mode
, known_ret
);
4150 /* See the macro definition above. */
4151 #undef cached_num_sign_bit_copies
4154 /* The function cached_num_sign_bit_copies is a wrapper around
4155 num_sign_bit_copies1. It avoids exponential behavior in
4156 num_sign_bit_copies1 when X has identical subexpressions on the
4157 first or the second level. */
4160 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4161 enum machine_mode known_mode
,
4162 unsigned int known_ret
)
4164 if (x
== known_x
&& mode
== known_mode
)
4167 /* Try to find identical subexpressions. If found call
4168 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4169 the precomputed value for the subexpression as KNOWN_RET. */
4171 if (ARITHMETIC_P (x
))
4173 rtx x0
= XEXP (x
, 0);
4174 rtx x1
= XEXP (x
, 1);
4176 /* Check the first level. */
4179 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4180 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4184 /* Check the second level. */
4185 if (ARITHMETIC_P (x0
)
4186 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4188 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4189 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4193 if (ARITHMETIC_P (x1
)
4194 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4196 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4197 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4202 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4205 /* Return the number of bits at the high-order end of X that are known to
4206 be equal to the sign bit. X will be used in mode MODE; if MODE is
4207 VOIDmode, X will be used in its own mode. The returned value will always
4208 be between 1 and the number of bits in MODE. */
4211 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4212 enum machine_mode known_mode
,
4213 unsigned int known_ret
)
4215 enum rtx_code code
= GET_CODE (x
);
4216 unsigned int bitwidth
= GET_MODE_BITSIZE (mode
);
4217 int num0
, num1
, result
;
4218 unsigned HOST_WIDE_INT nonzero
;
4220 /* If we weren't given a mode, use the mode of X. If the mode is still
4221 VOIDmode, we don't know anything. Likewise if one of the modes is
4224 if (mode
== VOIDmode
)
4225 mode
= GET_MODE (x
);
4227 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4228 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4231 /* For a smaller object, just ignore the high bits. */
4232 if (bitwidth
< GET_MODE_BITSIZE (GET_MODE (x
)))
4234 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4235 known_x
, known_mode
, known_ret
);
4237 num0
- (int) (GET_MODE_BITSIZE (GET_MODE (x
)) - bitwidth
));
4240 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_BITSIZE (GET_MODE (x
)))
4242 #ifndef WORD_REGISTER_OPERATIONS
4243 /* If this machine does not do all register operations on the entire
4244 register and MODE is wider than the mode of X, we can say nothing
4245 at all about the high-order bits. */
4248 /* Likewise on machines that do, if the mode of the object is smaller
4249 than a word and loads of that size don't sign extend, we can say
4250 nothing about the high order bits. */
4251 if (GET_MODE_BITSIZE (GET_MODE (x
)) < BITS_PER_WORD
4252 #ifdef LOAD_EXTEND_OP
4253 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4264 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4265 /* If pointers extend signed and this is a pointer in Pmode, say that
4266 all the bits above ptr_mode are known to be sign bit copies. */
4267 if (! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
&& mode
== Pmode
4269 return GET_MODE_BITSIZE (Pmode
) - GET_MODE_BITSIZE (ptr_mode
) + 1;
4273 unsigned int copies_for_hook
= 1, copies
= 1;
4274 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4275 known_mode
, known_ret
,
4279 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4280 known_mode
, known_ret
);
4282 if (copies
> 1 || copies_for_hook
> 1)
4283 return MAX (copies
, copies_for_hook
);
4285 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4290 #ifdef LOAD_EXTEND_OP
4291 /* Some RISC machines sign-extend all loads of smaller than a word. */
4292 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4293 return MAX (1, ((int) bitwidth
4294 - (int) GET_MODE_BITSIZE (GET_MODE (x
)) + 1));
4299 /* If the constant is negative, take its 1's complement and remask.
4300 Then see how many zero bits we have. */
4301 nonzero
= INTVAL (x
) & GET_MODE_MASK (mode
);
4302 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4303 && (nonzero
& ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4304 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4306 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4309 /* If this is a SUBREG for a promoted object that is sign-extended
4310 and we are looking at it in a wider mode, we know that at least the
4311 high-order bits are known to be sign bit copies. */
4313 if (SUBREG_PROMOTED_VAR_P (x
) && ! SUBREG_PROMOTED_UNSIGNED_P (x
))
4315 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4316 known_x
, known_mode
, known_ret
);
4317 return MAX ((int) bitwidth
4318 - (int) GET_MODE_BITSIZE (GET_MODE (x
)) + 1,
4322 /* For a smaller object, just ignore the high bits. */
4323 if (bitwidth
<= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))))
4325 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4326 known_x
, known_mode
, known_ret
);
4327 return MAX (1, (num0
4328 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
)))
4332 #ifdef WORD_REGISTER_OPERATIONS
4333 #ifdef LOAD_EXTEND_OP
4334 /* For paradoxical SUBREGs on machines where all register operations
4335 affect the entire register, just look inside. Note that we are
4336 passing MODE to the recursive call, so the number of sign bit copies
4337 will remain relative to that mode, not the inner mode. */
4339 /* This works only if loads sign extend. Otherwise, if we get a
4340 reload for the inner part, it may be loaded from the stack, and
4341 then we lose all sign bit copies that existed before the store
4344 if ((GET_MODE_SIZE (GET_MODE (x
))
4345 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
4346 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4347 && MEM_P (SUBREG_REG (x
)))
4348 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4349 known_x
, known_mode
, known_ret
);
4355 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4356 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4360 return (bitwidth
- GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
4361 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4362 known_x
, known_mode
, known_ret
));
4365 /* For a smaller object, just ignore the high bits. */
4366 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4367 known_x
, known_mode
, known_ret
);
4368 return MAX (1, (num0
- (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
4372 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4373 known_x
, known_mode
, known_ret
);
4375 case ROTATE
: case ROTATERT
:
4376 /* If we are rotating left by a number of bits less than the number
4377 of sign bit copies, we can just subtract that amount from the
4379 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
4380 && INTVAL (XEXP (x
, 1)) >= 0
4381 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4383 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4384 known_x
, known_mode
, known_ret
);
4385 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4386 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4391 /* In general, this subtracts one sign bit copy. But if the value
4392 is known to be positive, the number of sign bit copies is the
4393 same as that of the input. Finally, if the input has just one bit
4394 that might be nonzero, all the bits are copies of the sign bit. */
4395 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4396 known_x
, known_mode
, known_ret
);
4397 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4398 return num0
> 1 ? num0
- 1 : 1;
4400 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4405 && (((HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4410 case IOR
: case AND
: case XOR
:
4411 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4412 /* Logical operations will preserve the number of sign-bit copies.
4413 MIN and MAX operations always return one of the operands. */
4414 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4415 known_x
, known_mode
, known_ret
);
4416 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4417 known_x
, known_mode
, known_ret
);
4419 /* If num1 is clearing some of the top bits then regardless of
4420 the other term, we are guaranteed to have at least that many
4421 high-order zero bits. */
4424 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4425 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4426 && !(INTVAL (XEXP (x
, 1)) & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))))
4429 /* Similarly for IOR when setting high-order bits. */
4432 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4433 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4434 && (INTVAL (XEXP (x
, 1)) & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))))
4437 return MIN (num0
, num1
);
4439 case PLUS
: case MINUS
:
4440 /* For addition and subtraction, we can have a 1-bit carry. However,
4441 if we are subtracting 1 from a positive number, there will not
4442 be such a carry. Furthermore, if the positive number is known to
4443 be 0 or 1, we know the result is either -1 or 0. */
4445 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4446 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4448 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4449 if ((((HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4450 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4451 : bitwidth
- floor_log2 (nonzero
) - 1);
4454 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4455 known_x
, known_mode
, known_ret
);
4456 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4457 known_x
, known_mode
, known_ret
);
4458 result
= MAX (1, MIN (num0
, num1
) - 1);
4460 #ifdef POINTERS_EXTEND_UNSIGNED
4461 /* If pointers extend signed and this is an addition or subtraction
4462 to a pointer in Pmode, all the bits above ptr_mode are known to be
4464 if (! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4465 && (code
== PLUS
|| code
== MINUS
)
4466 && REG_P (XEXP (x
, 0)) && REG_POINTER (XEXP (x
, 0)))
4467 result
= MAX ((int) (GET_MODE_BITSIZE (Pmode
)
4468 - GET_MODE_BITSIZE (ptr_mode
) + 1),
4474 /* The number of bits of the product is the sum of the number of
4475 bits of both terms. However, unless one of the terms if known
4476 to be positive, we must allow for an additional bit since negating
4477 a negative number can remove one sign bit copy. */
4479 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4480 known_x
, known_mode
, known_ret
);
4481 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4482 known_x
, known_mode
, known_ret
);
4484 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4486 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4487 || (((nonzero_bits (XEXP (x
, 0), mode
)
4488 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4489 && ((nonzero_bits (XEXP (x
, 1), mode
)
4490 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))))
4493 return MAX (1, result
);
4496 /* The result must be <= the first operand. If the first operand
4497 has the high bit set, we know nothing about the number of sign
4499 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4501 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4502 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4505 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4506 known_x
, known_mode
, known_ret
);
4509 /* The result must be <= the second operand. */
4510 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4511 known_x
, known_mode
, known_ret
);
4514 /* Similar to unsigned division, except that we have to worry about
4515 the case where the divisor is negative, in which case we have
4517 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4518 known_x
, known_mode
, known_ret
);
4520 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4521 || (nonzero_bits (XEXP (x
, 1), mode
)
4522 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4528 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4529 known_x
, known_mode
, known_ret
);
4531 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4532 || (nonzero_bits (XEXP (x
, 1), mode
)
4533 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4539 /* Shifts by a constant add to the number of bits equal to the
4541 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4542 known_x
, known_mode
, known_ret
);
4543 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
4544 && INTVAL (XEXP (x
, 1)) > 0)
4545 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4550 /* Left shifts destroy copies. */
4551 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
4552 || INTVAL (XEXP (x
, 1)) < 0
4553 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
)
4556 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4557 known_x
, known_mode
, known_ret
);
4558 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4561 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4562 known_x
, known_mode
, known_ret
);
4563 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4564 known_x
, known_mode
, known_ret
);
4565 return MIN (num0
, num1
);
4567 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4568 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4569 case GEU
: case GTU
: case LEU
: case LTU
:
4570 case UNORDERED
: case ORDERED
:
4571 /* If the constant is negative, take its 1's complement and remask.
4572 Then see how many zero bits we have. */
4573 nonzero
= STORE_FLAG_VALUE
;
4574 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4575 && (nonzero
& ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4576 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4578 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4584 /* If we haven't been able to figure it out by one of the above rules,
4585 see if some of the high-order bits are known to be zero. If so,
4586 count those bits and return one less than that amount. If we can't
4587 safely compute the mask for this mode, always return BITWIDTH. */
4589 bitwidth
= GET_MODE_BITSIZE (mode
);
4590 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4593 nonzero
= nonzero_bits (x
, mode
);
4594 return nonzero
& ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4595 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4598 /* Calculate the rtx_cost of a single instruction. A return value of
4599 zero indicates an instruction pattern without a known cost. */
4602 insn_rtx_cost (rtx pat
, bool speed
)
4607 /* Extract the single set rtx from the instruction pattern.
4608 We can't use single_set since we only have the pattern. */
4609 if (GET_CODE (pat
) == SET
)
4611 else if (GET_CODE (pat
) == PARALLEL
)
4614 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4616 rtx x
= XVECEXP (pat
, 0, i
);
4617 if (GET_CODE (x
) == SET
)
4630 cost
= rtx_cost (SET_SRC (set
), SET
, speed
);
4631 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4634 /* Given an insn INSN and condition COND, return the condition in a
4635 canonical form to simplify testing by callers. Specifically:
4637 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4638 (2) Both operands will be machine operands; (cc0) will have been replaced.
4639 (3) If an operand is a constant, it will be the second operand.
4640 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4641 for GE, GEU, and LEU.
4643 If the condition cannot be understood, or is an inequality floating-point
4644 comparison which needs to be reversed, 0 will be returned.
4646 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4648 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4649 insn used in locating the condition was found. If a replacement test
4650 of the condition is desired, it should be placed in front of that
4651 insn and we will be sure that the inputs are still valid.
4653 If WANT_REG is nonzero, we wish the condition to be relative to that
4654 register, if possible. Therefore, do not canonicalize the condition
4655 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4656 to be a compare to a CC mode register.
4658 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4662 canonicalize_condition (rtx insn
, rtx cond
, int reverse
, rtx
*earliest
,
4663 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4670 int reverse_code
= 0;
4671 enum machine_mode mode
;
4672 basic_block bb
= BLOCK_FOR_INSN (insn
);
4674 code
= GET_CODE (cond
);
4675 mode
= GET_MODE (cond
);
4676 op0
= XEXP (cond
, 0);
4677 op1
= XEXP (cond
, 1);
4680 code
= reversed_comparison_code (cond
, insn
);
4681 if (code
== UNKNOWN
)
4687 /* If we are comparing a register with zero, see if the register is set
4688 in the previous insn to a COMPARE or a comparison operation. Perform
4689 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4692 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
4693 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
4694 && op1
== CONST0_RTX (GET_MODE (op0
))
4697 /* Set nonzero when we find something of interest. */
4701 /* If comparison with cc0, import actual comparison from compare
4705 if ((prev
= prev_nonnote_insn (prev
)) == 0
4706 || !NONJUMP_INSN_P (prev
)
4707 || (set
= single_set (prev
)) == 0
4708 || SET_DEST (set
) != cc0_rtx
)
4711 op0
= SET_SRC (set
);
4712 op1
= CONST0_RTX (GET_MODE (op0
));
4718 /* If this is a COMPARE, pick up the two things being compared. */
4719 if (GET_CODE (op0
) == COMPARE
)
4721 op1
= XEXP (op0
, 1);
4722 op0
= XEXP (op0
, 0);
4725 else if (!REG_P (op0
))
4728 /* Go back to the previous insn. Stop if it is not an INSN. We also
4729 stop if it isn't a single set or if it has a REG_INC note because
4730 we don't want to bother dealing with it. */
4732 if ((prev
= prev_nonnote_insn (prev
)) == 0
4733 || !NONJUMP_INSN_P (prev
)
4734 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
4735 /* In cfglayout mode, there do not have to be labels at the
4736 beginning of a block, or jumps at the end, so the previous
4737 conditions would not stop us when we reach bb boundary. */
4738 || BLOCK_FOR_INSN (prev
) != bb
)
4741 set
= set_of (op0
, prev
);
4744 && (GET_CODE (set
) != SET
4745 || !rtx_equal_p (SET_DEST (set
), op0
)))
4748 /* If this is setting OP0, get what it sets it to if it looks
4752 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
4753 #ifdef FLOAT_STORE_FLAG_VALUE
4754 REAL_VALUE_TYPE fsfv
;
4757 /* ??? We may not combine comparisons done in a CCmode with
4758 comparisons not done in a CCmode. This is to aid targets
4759 like Alpha that have an IEEE compliant EQ instruction, and
4760 a non-IEEE compliant BEQ instruction. The use of CCmode is
4761 actually artificial, simply to prevent the combination, but
4762 should not affect other platforms.
4764 However, we must allow VOIDmode comparisons to match either
4765 CCmode or non-CCmode comparison, because some ports have
4766 modeless comparisons inside branch patterns.
4768 ??? This mode check should perhaps look more like the mode check
4769 in simplify_comparison in combine. */
4771 if ((GET_CODE (SET_SRC (set
)) == COMPARE
4774 && GET_MODE_CLASS (inner_mode
) == MODE_INT
4775 && (GET_MODE_BITSIZE (inner_mode
)
4776 <= HOST_BITS_PER_WIDE_INT
)
4777 && (STORE_FLAG_VALUE
4778 & ((HOST_WIDE_INT
) 1
4779 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
4780 #ifdef FLOAT_STORE_FLAG_VALUE
4782 && SCALAR_FLOAT_MODE_P (inner_mode
)
4783 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4784 REAL_VALUE_NEGATIVE (fsfv
)))
4787 && COMPARISON_P (SET_SRC (set
))))
4788 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4789 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
4790 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
4792 else if (((code
== EQ
4794 && (GET_MODE_BITSIZE (inner_mode
)
4795 <= HOST_BITS_PER_WIDE_INT
)
4796 && GET_MODE_CLASS (inner_mode
) == MODE_INT
4797 && (STORE_FLAG_VALUE
4798 & ((HOST_WIDE_INT
) 1
4799 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
4800 #ifdef FLOAT_STORE_FLAG_VALUE
4802 && SCALAR_FLOAT_MODE_P (inner_mode
)
4803 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4804 REAL_VALUE_NEGATIVE (fsfv
)))
4807 && COMPARISON_P (SET_SRC (set
))
4808 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4809 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
4810 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
4820 else if (reg_set_p (op0
, prev
))
4821 /* If this sets OP0, but not directly, we have to give up. */
4826 /* If the caller is expecting the condition to be valid at INSN,
4827 make sure X doesn't change before INSN. */
4828 if (valid_at_insn_p
)
4829 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
4831 if (COMPARISON_P (x
))
4832 code
= GET_CODE (x
);
4835 code
= reversed_comparison_code (x
, prev
);
4836 if (code
== UNKNOWN
)
4841 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
4847 /* If constant is first, put it last. */
4848 if (CONSTANT_P (op0
))
4849 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
4851 /* If OP0 is the result of a comparison, we weren't able to find what
4852 was really being compared, so fail. */
4854 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
4857 /* Canonicalize any ordered comparison with integers involving equality
4858 if we can do computations in the relevant mode and we do not
4861 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
4862 && GET_CODE (op1
) == CONST_INT
4863 && GET_MODE (op0
) != VOIDmode
4864 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
4866 HOST_WIDE_INT const_val
= INTVAL (op1
);
4867 unsigned HOST_WIDE_INT uconst_val
= const_val
;
4868 unsigned HOST_WIDE_INT max_val
4869 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
4874 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
4875 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
4878 /* When cross-compiling, const_val might be sign-extended from
4879 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4881 if ((HOST_WIDE_INT
) (const_val
& max_val
)
4882 != (((HOST_WIDE_INT
) 1
4883 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
4884 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
4888 if (uconst_val
< max_val
)
4889 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
4893 if (uconst_val
!= 0)
4894 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
4902 /* Never return CC0; return zero instead. */
4906 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
4909 /* Given a jump insn JUMP, return the condition that will cause it to branch
4910 to its JUMP_LABEL. If the condition cannot be understood, or is an
4911 inequality floating-point comparison which needs to be reversed, 0 will
4914 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4915 insn used in locating the condition was found. If a replacement test
4916 of the condition is desired, it should be placed in front of that
4917 insn and we will be sure that the inputs are still valid. If EARLIEST
4918 is null, the returned condition will be valid at INSN.
4920 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4921 compare CC mode register.
4923 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4926 get_condition (rtx jump
, rtx
*earliest
, int allow_cc_mode
, int valid_at_insn_p
)
4932 /* If this is not a standard conditional jump, we can't parse it. */
4934 || ! any_condjump_p (jump
))
4936 set
= pc_set (jump
);
4938 cond
= XEXP (SET_SRC (set
), 0);
4940 /* If this branches to JUMP_LABEL when the condition is false, reverse
4943 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
4944 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
4946 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
4947 allow_cc_mode
, valid_at_insn_p
);
4950 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4951 TARGET_MODE_REP_EXTENDED.
4953 Note that we assume that the property of
4954 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4955 narrower than mode B. I.e., if A is a mode narrower than B then in
4956 order to be able to operate on it in mode B, mode A needs to
4957 satisfy the requirements set by the representation of mode B. */
4960 init_num_sign_bit_copies_in_rep (void)
4962 enum machine_mode mode
, in_mode
;
4964 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
4965 in_mode
= GET_MODE_WIDER_MODE (mode
))
4966 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
4967 mode
= GET_MODE_WIDER_MODE (mode
))
4969 enum machine_mode i
;
4971 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4972 extends to the next widest mode. */
4973 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
4974 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
4976 /* We are in in_mode. Count how many bits outside of mode
4977 have to be copies of the sign-bit. */
4978 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
4980 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
4982 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
4983 /* We can only check sign-bit copies starting from the
4984 top-bit. In order to be able to check the bits we
4985 have already seen we pretend that subsequent bits
4986 have to be sign-bit copies too. */
4987 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
4988 num_sign_bit_copies_in_rep
[in_mode
][mode
]
4989 += GET_MODE_BITSIZE (wider
) - GET_MODE_BITSIZE (i
);
4994 /* Suppose that truncation from the machine mode of X to MODE is not a
4995 no-op. See if there is anything special about X so that we can
4996 assume it already contains a truncated value of MODE. */
4999 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5001 /* This register has already been used in MODE without explicit
5003 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5006 /* See if we already satisfy the requirements of MODE. If yes we
5007 can just switch to MODE. */
5008 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5009 && (num_sign_bit_copies (x
, GET_MODE (x
))
5010 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5016 /* Initialize non_rtx_starting_operands, which is used to speed up
5022 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5024 const char *format
= GET_RTX_FORMAT (i
);
5025 const char *first
= strpbrk (format
, "eEV");
5026 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5029 init_num_sign_bit_copies_in_rep ();
5032 /* Check whether this is a constant pool constant. */
5034 constant_pool_constant_p (rtx x
)
5036 x
= avoid_constant_pool_reference (x
);
5037 return GET_CODE (x
) == CONST_DOUBLE
;