1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
38 #include "hard-reg-set.h"
40 /* Forward declarations */
41 static void set_of_1 (rtx
, const_rtx
, void *);
42 static bool covers_regno_p (const_rtx
, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
44 static int computed_jump_p_1 (const_rtx
);
45 static void parms_set (rtx
, const_rtx
, void *);
47 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, scalar_int_mode
,
48 const_rtx
, machine_mode
,
49 unsigned HOST_WIDE_INT
);
50 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, scalar_int_mode
,
51 const_rtx
, machine_mode
,
52 unsigned HOST_WIDE_INT
);
53 static unsigned int cached_num_sign_bit_copies (const_rtx
, scalar_int_mode
,
54 const_rtx
, machine_mode
,
56 static unsigned int num_sign_bit_copies1 (const_rtx
, scalar_int_mode
,
57 const_rtx
, machine_mode
,
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65 SIGN_EXTEND then while narrowing we also have to enforce the
66 representation and sign-extend the value to mode DESTINATION_REP.
68 If the value is already sign-extended to DESTINATION_REP mode we
69 can just switch to DESTINATION mode on it. For each pair of
70 integral modes SOURCE and DESTINATION, when truncating from SOURCE
71 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72 contains the number of high-order bits in SOURCE that have to be
73 copies of the sign-bit so that we can do this mode-switch to
77 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
79 /* Store X into index I of ARRAY. ARRAY is known to have at least I
80 elements. Return the new base of ARRAY. */
83 typename
T::value_type
*
84 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
86 size_t i
, value_type x
)
88 if (base
== array
.stack
)
95 gcc_checking_assert (i
== LOCAL_ELEMS
);
96 /* A previous iteration might also have moved from the stack to the
97 heap, in which case the heap array will already be big enough. */
98 if (vec_safe_length (array
.heap
) <= i
)
99 vec_safe_grow (array
.heap
, i
+ 1);
100 base
= array
.heap
->address ();
101 memcpy (base
, array
.stack
, sizeof (array
.stack
));
102 base
[LOCAL_ELEMS
] = x
;
105 unsigned int length
= array
.heap
->length ();
108 gcc_checking_assert (base
== array
.heap
->address ());
114 gcc_checking_assert (i
== length
);
115 vec_safe_push (array
.heap
, x
);
116 return array
.heap
->address ();
120 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
121 number of elements added to the worklist. */
123 template <typename T
>
125 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
127 size_t end
, rtx_type x
)
129 enum rtx_code code
= GET_CODE (x
);
130 const char *format
= GET_RTX_FORMAT (code
);
131 size_t orig_end
= end
;
132 if (__builtin_expect (INSN_P (x
), false))
134 /* Put the pattern at the top of the queue, since that's what
135 we're likely to want most. It also allows for the SEQUENCE
137 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
138 if (format
[i
] == 'e')
140 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
141 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
144 base
= add_single_to_queue (array
, base
, end
++, subx
);
148 for (int i
= 0; format
[i
]; ++i
)
149 if (format
[i
] == 'e')
151 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
152 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
155 base
= add_single_to_queue (array
, base
, end
++, subx
);
157 else if (format
[i
] == 'E')
159 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
160 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
161 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
162 for (unsigned int j
= 0; j
< length
; j
++)
163 base
[end
++] = T::get_value (vec
[j
]);
165 for (unsigned int j
= 0; j
< length
; j
++)
166 base
= add_single_to_queue (array
, base
, end
++,
167 T::get_value (vec
[j
]));
168 if (code
== SEQUENCE
&& end
== length
)
169 /* If the subrtxes of the sequence fill the entire array then
170 we know that no other parts of a containing insn are queued.
171 The caller is therefore iterating over the sequence as a
172 PATTERN (...), so we also want the patterns of the
174 for (unsigned int j
= 0; j
< length
; j
++)
176 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
178 base
[j
] = T::get_value (PATTERN (x
));
181 return end
- orig_end
;
184 template <typename T
>
186 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
188 vec_free (array
.heap
);
191 template <typename T
>
192 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
194 template class generic_subrtx_iterator
<const_rtx_accessor
>;
195 template class generic_subrtx_iterator
<rtx_var_accessor
>;
196 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
198 /* Return 1 if the value of X is unstable
199 (would be different at a different point in the program).
200 The frame pointer, arg pointer, etc. are considered stable
201 (within one function) and so is anything marked `unchanging'. */
204 rtx_unstable_p (const_rtx x
)
206 const RTX_CODE code
= GET_CODE (x
);
213 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
222 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
223 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
224 /* The arg pointer varies if it is not a fixed register. */
225 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
227 /* ??? When call-clobbered, the value is stable modulo the restore
228 that must happen after a call. This currently screws up local-alloc
229 into believing that the restore is not needed. */
230 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
235 if (MEM_VOLATILE_P (x
))
244 fmt
= GET_RTX_FORMAT (code
);
245 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
248 if (rtx_unstable_p (XEXP (x
, i
)))
251 else if (fmt
[i
] == 'E')
254 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
255 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
262 /* Return 1 if X has a value that can vary even between two
263 executions of the program. 0 means X can be compared reliably
264 against certain constants or near-constants.
265 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266 zero, we are slightly more conservative.
267 The frame pointer and the arg pointer are considered constant. */
270 rtx_varies_p (const_rtx x
, bool for_alias
)
283 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
292 /* Note that we have to test for the actual rtx used for the frame
293 and arg pointers and not just the register number in case we have
294 eliminated the frame and/or arg pointer and are using it
296 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
297 /* The arg pointer varies if it is not a fixed register. */
298 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
300 if (x
== pic_offset_table_rtx
301 /* ??? When call-clobbered, the value is stable modulo the restore
302 that must happen after a call. This currently screws up
303 local-alloc into believing that the restore is not needed, so we
304 must return 0 only if we are called from alias analysis. */
305 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
310 /* The operand 0 of a LO_SUM is considered constant
311 (in fact it is related specifically to operand 1)
312 during alias analysis. */
313 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
314 || rtx_varies_p (XEXP (x
, 1), for_alias
);
317 if (MEM_VOLATILE_P (x
))
326 fmt
= GET_RTX_FORMAT (code
);
327 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
330 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
333 else if (fmt
[i
] == 'E')
336 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
337 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
344 /* Compute an approximation for the offset between the register
345 FROM and TO for the current function, as it was at the start
349 get_initial_register_offset (int from
, int to
)
351 static const struct elim_table_t
355 } table
[] = ELIMINABLE_REGS
;
356 poly_int64 offset1
, offset2
;
362 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
363 is completed, but we need to give at least an estimate for the stack
364 pointer based on the frame size. */
365 if (!epilogue_completed
)
367 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
371 if (to
== STACK_POINTER_REGNUM
)
373 else if (from
== STACK_POINTER_REGNUM
)
379 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
380 if (table
[i
].from
== from
)
382 if (table
[i
].to
== to
)
384 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
388 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
390 if (table
[j
].to
== to
391 && table
[j
].from
== table
[i
].to
)
393 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
395 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
397 return offset1
+ offset2
;
399 if (table
[j
].from
== to
400 && table
[j
].to
== table
[i
].to
)
402 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
404 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
406 return offset1
- offset2
;
410 else if (table
[i
].to
== from
)
412 if (table
[i
].from
== to
)
414 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
418 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
420 if (table
[j
].to
== to
421 && table
[j
].from
== table
[i
].from
)
423 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
425 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
427 return - offset1
+ offset2
;
429 if (table
[j
].from
== to
430 && table
[j
].to
== table
[i
].from
)
432 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
434 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
436 return - offset1
- offset2
;
441 /* If the requested register combination was not found,
442 try a different more simple combination. */
443 if (from
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
445 else if (to
== ARG_POINTER_REGNUM
)
446 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
447 else if (from
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
449 else if (to
== HARD_FRAME_POINTER_REGNUM
)
450 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
455 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
456 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
457 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
458 references on strict alignment machines. */
461 rtx_addr_can_trap_p_1 (const_rtx x
, poly_int64 offset
, poly_int64 size
,
462 machine_mode mode
, bool unaligned_mems
)
464 enum rtx_code code
= GET_CODE (x
);
465 gcc_checking_assert (mode
== BLKmode
|| known_size_p (size
));
468 /* The offset must be a multiple of the mode size if we are considering
469 unaligned memory references on strict alignment machines. */
470 if (STRICT_ALIGNMENT
&& unaligned_mems
&& mode
!= BLKmode
)
472 poly_int64 actual_offset
= offset
;
474 #ifdef SPARC_STACK_BOUNDARY_HACK
475 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
476 the real alignment of %sp. However, when it does this, the
477 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
478 if (SPARC_STACK_BOUNDARY_HACK
479 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
480 actual_offset
-= STACK_POINTER_OFFSET
;
483 if (!multiple_p (actual_offset
, GET_MODE_SIZE (mode
)))
490 if (SYMBOL_REF_WEAK (x
))
492 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
495 poly_int64 decl_size
;
497 if (maybe_lt (offset
, 0))
499 if (!known_size_p (size
))
500 return maybe_ne (offset
, 0);
502 /* If the size of the access or of the symbol is unknown,
504 decl
= SYMBOL_REF_DECL (x
);
506 /* Else check that the access is in bounds. TODO: restructure
507 expr_size/tree_expr_size/int_expr_size and just use the latter. */
510 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
512 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl
), &decl_size
))
515 else if (TREE_CODE (decl
) == STRING_CST
)
516 decl_size
= TREE_STRING_LENGTH (decl
);
517 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
518 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
522 return (!known_size_p (decl_size
) || known_eq (decl_size
, 0)
523 ? maybe_ne (offset
, 0)
524 : !known_subrange_p (offset
, size
, 0, decl_size
));
533 /* Stack references are assumed not to trap, but we need to deal with
534 nonsensical offsets. */
535 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
536 || x
== stack_pointer_rtx
537 /* The arg pointer varies if it is not a fixed register. */
538 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
541 poly_int64 red_zone_size
= RED_ZONE_SIZE
;
543 poly_int64 red_zone_size
= 0;
545 poly_int64 stack_boundary
= PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
;
546 poly_int64 low_bound
, high_bound
;
548 if (!known_size_p (size
))
551 if (x
== frame_pointer_rtx
)
553 if (FRAME_GROWS_DOWNWARD
)
555 high_bound
= targetm
.starting_frame_offset ();
556 low_bound
= high_bound
- get_frame_size ();
560 low_bound
= targetm
.starting_frame_offset ();
561 high_bound
= low_bound
+ get_frame_size ();
564 else if (x
== hard_frame_pointer_rtx
)
567 = get_initial_register_offset (STACK_POINTER_REGNUM
,
568 HARD_FRAME_POINTER_REGNUM
);
570 = get_initial_register_offset (ARG_POINTER_REGNUM
,
571 HARD_FRAME_POINTER_REGNUM
);
573 #if STACK_GROWS_DOWNWARD
574 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
575 high_bound
= ap_offset
576 + FIRST_PARM_OFFSET (current_function_decl
)
577 #if !ARGS_GROW_DOWNWARD
582 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
583 low_bound
= ap_offset
584 + FIRST_PARM_OFFSET (current_function_decl
)
585 #if ARGS_GROW_DOWNWARD
591 else if (x
== stack_pointer_rtx
)
594 = get_initial_register_offset (ARG_POINTER_REGNUM
,
595 STACK_POINTER_REGNUM
);
597 #if STACK_GROWS_DOWNWARD
598 low_bound
= - red_zone_size
- stack_boundary
;
599 high_bound
= ap_offset
600 + FIRST_PARM_OFFSET (current_function_decl
)
601 #if !ARGS_GROW_DOWNWARD
606 high_bound
= red_zone_size
+ stack_boundary
;
607 low_bound
= ap_offset
608 + FIRST_PARM_OFFSET (current_function_decl
)
609 #if ARGS_GROW_DOWNWARD
617 /* We assume that accesses are safe to at least the
619 Examples are varargs and __builtin_return_address. */
620 #if ARGS_GROW_DOWNWARD
621 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
623 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
624 - crtl
->args
.size
- stack_boundary
;
626 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
628 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
629 + crtl
->args
.size
+ stack_boundary
;
633 if (known_ge (offset
, low_bound
)
634 && known_le (offset
, high_bound
- size
))
638 /* All of the virtual frame registers are stack references. */
639 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
640 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
645 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
646 mode
, unaligned_mems
);
649 /* An address is assumed not to trap if:
650 - it is the pic register plus a const unspec without offset. */
651 if (XEXP (x
, 0) == pic_offset_table_rtx
652 && GET_CODE (XEXP (x
, 1)) == CONST
653 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
654 && known_eq (offset
, 0))
657 /* - or it is an address that can't trap plus a constant integer. */
658 if (poly_int_rtx_p (XEXP (x
, 1), &const_x1
)
659 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ const_x1
,
660 size
, mode
, unaligned_mems
))
667 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
668 mode
, unaligned_mems
);
675 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
676 mode
, unaligned_mems
);
682 /* If it isn't one of the case above, it can cause a trap. */
686 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
689 rtx_addr_can_trap_p (const_rtx x
)
691 return rtx_addr_can_trap_p_1 (x
, 0, -1, BLKmode
, false);
694 /* Return true if X contains a MEM subrtx. */
697 contains_mem_rtx_p (rtx x
)
699 subrtx_iterator::array_type array
;
700 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
707 /* Return true if X is an address that is known to not be zero. */
710 nonzero_address_p (const_rtx x
)
712 const enum rtx_code code
= GET_CODE (x
);
717 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
723 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
724 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
725 || x
== stack_pointer_rtx
726 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
728 /* All of the virtual frame registers are stack references. */
729 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
730 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
735 return nonzero_address_p (XEXP (x
, 0));
738 /* Handle PIC references. */
739 if (XEXP (x
, 0) == pic_offset_table_rtx
740 && CONSTANT_P (XEXP (x
, 1)))
745 /* Similar to the above; allow positive offsets. Further, since
746 auto-inc is only allowed in memories, the register must be a
748 if (CONST_INT_P (XEXP (x
, 1))
749 && INTVAL (XEXP (x
, 1)) > 0)
751 return nonzero_address_p (XEXP (x
, 0));
754 /* Similarly. Further, the offset is always positive. */
761 return nonzero_address_p (XEXP (x
, 0));
764 return nonzero_address_p (XEXP (x
, 1));
770 /* If it isn't one of the case above, might be zero. */
774 /* Return 1 if X refers to a memory location whose address
775 cannot be compared reliably with constant addresses,
776 or if X refers to a BLKmode memory object.
777 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
778 zero, we are slightly more conservative. */
781 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
792 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
794 fmt
= GET_RTX_FORMAT (code
);
795 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
798 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
801 else if (fmt
[i
] == 'E')
804 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
805 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
811 /* Return the CALL in X if there is one. */
814 get_call_rtx_from (const rtx_insn
*insn
)
816 rtx x
= PATTERN (insn
);
817 if (GET_CODE (x
) == PARALLEL
)
818 x
= XVECEXP (x
, 0, 0);
819 if (GET_CODE (x
) == SET
)
821 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
826 /* Return the value of the integer term in X, if one is apparent;
828 Only obvious integer terms are detected.
829 This is used in cse.c with the `related_value' field. */
832 get_integer_term (const_rtx x
)
834 if (GET_CODE (x
) == CONST
)
837 if (GET_CODE (x
) == MINUS
838 && CONST_INT_P (XEXP (x
, 1)))
839 return - INTVAL (XEXP (x
, 1));
840 if (GET_CODE (x
) == PLUS
841 && CONST_INT_P (XEXP (x
, 1)))
842 return INTVAL (XEXP (x
, 1));
846 /* If X is a constant, return the value sans apparent integer term;
848 Only obvious integer terms are detected. */
851 get_related_value (const_rtx x
)
853 if (GET_CODE (x
) != CONST
)
856 if (GET_CODE (x
) == PLUS
857 && CONST_INT_P (XEXP (x
, 1)))
859 else if (GET_CODE (x
) == MINUS
860 && CONST_INT_P (XEXP (x
, 1)))
865 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
866 to somewhere in the same object or object_block as SYMBOL. */
869 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
873 if (GET_CODE (symbol
) != SYMBOL_REF
)
881 if (CONSTANT_POOL_ADDRESS_P (symbol
)
882 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
885 decl
= SYMBOL_REF_DECL (symbol
);
886 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
890 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
891 && SYMBOL_REF_BLOCK (symbol
)
892 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
893 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
894 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
900 /* Split X into a base and a constant offset, storing them in *BASE_OUT
901 and *OFFSET_OUT respectively. */
904 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
906 if (GET_CODE (x
) == CONST
)
909 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
911 *base_out
= XEXP (x
, 0);
912 *offset_out
= XEXP (x
, 1);
917 *offset_out
= const0_rtx
;
920 /* Express integer value X as some value Y plus a polynomial offset,
921 where Y is either const0_rtx, X or something within X (as opposed
922 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
925 strip_offset (rtx x
, poly_int64_pod
*offset_out
)
927 rtx base
= const0_rtx
;
929 if (GET_CODE (test
) == CONST
)
930 test
= XEXP (test
, 0);
931 if (GET_CODE (test
) == PLUS
)
933 base
= XEXP (test
, 0);
934 test
= XEXP (test
, 1);
936 if (poly_int_rtx_p (test
, offset_out
))
942 /* Return the argument size in REG_ARGS_SIZE note X. */
945 get_args_size (const_rtx x
)
947 gcc_checking_assert (REG_NOTE_KIND (x
) == REG_ARGS_SIZE
);
948 return rtx_to_poly_int64 (XEXP (x
, 0));
951 /* Return the number of places FIND appears within X. If COUNT_DEST is
952 zero, we do not count occurrences inside the destination of a SET. */
955 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
959 const char *format_ptr
;
978 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
980 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
984 if (MEM_P (find
) && rtx_equal_p (x
, find
))
989 if (SET_DEST (x
) == find
&& ! count_dest
)
990 return count_occurrences (SET_SRC (x
), find
, count_dest
);
997 format_ptr
= GET_RTX_FORMAT (code
);
1000 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
1002 switch (*format_ptr
++)
1005 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
1009 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1010 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
1018 /* Return TRUE if OP is a register or subreg of a register that
1019 holds an unsigned quantity. Otherwise, return FALSE. */
1022 unsigned_reg_p (rtx op
)
1026 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1029 if (GET_CODE (op
) == SUBREG
1030 && SUBREG_PROMOTED_SIGN (op
))
1037 /* Nonzero if register REG appears somewhere within IN.
1038 Also works if REG is not a register; in this case it checks
1039 for a subexpression of IN that is Lisp "equal" to REG. */
1042 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1054 if (GET_CODE (in
) == LABEL_REF
)
1055 return reg
== label_ref_label (in
);
1057 code
= GET_CODE (in
);
1061 /* Compare registers by number. */
1063 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1065 /* These codes have no constituent expressions
1073 /* These are kept unique for a given value. */
1080 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1083 fmt
= GET_RTX_FORMAT (code
);
1085 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1090 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1091 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1094 else if (fmt
[i
] == 'e'
1095 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1101 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1102 no CODE_LABEL insn. */
1105 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1110 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1116 /* Nonzero if register REG is used in an insn between
1117 FROM_INSN and TO_INSN (exclusive of those two). */
1120 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1121 const rtx_insn
*to_insn
)
1125 if (from_insn
== to_insn
)
1128 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1129 if (NONDEBUG_INSN_P (insn
)
1130 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1131 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1136 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1137 is entirely replaced by a new value and the only use is as a SET_DEST,
1138 we do not consider it a reference. */
1141 reg_referenced_p (const_rtx x
, const_rtx body
)
1145 switch (GET_CODE (body
))
1148 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1151 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1152 of a REG that occupies all of the REG, the insn references X if
1153 it is mentioned in the destination. */
1154 if (GET_CODE (SET_DEST (body
)) != CC0
1155 && GET_CODE (SET_DEST (body
)) != PC
1156 && !REG_P (SET_DEST (body
))
1157 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1158 && REG_P (SUBREG_REG (SET_DEST (body
)))
1159 && !read_modify_subreg_p (SET_DEST (body
)))
1160 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1165 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1166 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1173 return reg_overlap_mentioned_p (x
, body
);
1176 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1179 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1182 case UNSPEC_VOLATILE
:
1183 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1184 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1189 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1190 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1195 if (MEM_P (XEXP (body
, 0)))
1196 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1201 gcc_assert (REG_P (XEXP (body
, 0)));
1205 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1207 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1214 /* Nonzero if register REG is set or clobbered in an insn between
1215 FROM_INSN and TO_INSN (exclusive of those two). */
1218 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1219 const rtx_insn
*to_insn
)
1221 const rtx_insn
*insn
;
1223 if (from_insn
== to_insn
)
1226 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1227 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1232 /* Return true if REG is set or clobbered inside INSN. */
1235 reg_set_p (const_rtx reg
, const_rtx insn
)
1237 /* After delay slot handling, call and branch insns might be in a
1238 sequence. Check all the elements there. */
1239 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1241 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1242 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1248 /* We can be passed an insn or part of one. If we are passed an insn,
1249 check if a side-effect of the insn clobbers REG. */
1251 && (FIND_REG_INC_NOTE (insn
, reg
)
1254 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1255 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1256 GET_MODE (reg
), REGNO (reg
)))
1258 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1261 /* There are no REG_INC notes for SP autoinc. */
1262 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1264 subrtx_var_iterator::array_type array
;
1265 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1270 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1272 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1274 iter
.skip_subrtxes ();
1279 return set_of (reg
, insn
) != NULL_RTX
;
1282 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1283 only if none of them are modified between START and END. Return 1 if
1284 X contains a MEM; this routine does use memory aliasing. */
1287 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1289 const enum rtx_code code
= GET_CODE (x
);
1310 if (modified_between_p (XEXP (x
, 0), start
, end
))
1312 if (MEM_READONLY_P (x
))
1314 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1315 if (memory_modified_in_insn_p (x
, insn
))
1320 return reg_set_between_p (x
, start
, end
);
1326 fmt
= GET_RTX_FORMAT (code
);
1327 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1329 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1332 else if (fmt
[i
] == 'E')
1333 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1334 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1341 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1342 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1343 does use memory aliasing. */
1346 modified_in_p (const_rtx x
, const_rtx insn
)
1348 const enum rtx_code code
= GET_CODE (x
);
1365 if (modified_in_p (XEXP (x
, 0), insn
))
1367 if (MEM_READONLY_P (x
))
1369 if (memory_modified_in_insn_p (x
, insn
))
1374 return reg_set_p (x
, insn
);
1380 fmt
= GET_RTX_FORMAT (code
);
1381 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1383 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1386 else if (fmt
[i
] == 'E')
1387 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1388 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1395 /* Return true if X is a SUBREG and if storing a value to X would
1396 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1397 target, using a SUBREG to store to one half of a DImode REG would
1398 preserve the other half. */
1401 read_modify_subreg_p (const_rtx x
)
1403 if (GET_CODE (x
) != SUBREG
)
1405 poly_uint64 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1406 poly_uint64 osize
= GET_MODE_SIZE (GET_MODE (x
));
1407 poly_uint64 regsize
= REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x
)));
1408 /* The inner and outer modes of a subreg must be ordered, so that we
1409 can tell whether they're paradoxical or partial. */
1410 gcc_checking_assert (ordered_p (isize
, osize
));
1411 return (maybe_gt (isize
, osize
) && maybe_gt (isize
, regsize
));
1414 /* Helper function for set_of. */
1422 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1424 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1425 if (rtx_equal_p (x
, data
->pat
)
1426 || (GET_CODE (pat
) == CLOBBER_HIGH
1427 && REGNO(data
->pat
) == REGNO(XEXP (pat
, 0))
1428 && reg_is_clobbered_by_clobber_high (data
->pat
, XEXP (pat
, 0)))
1429 || (GET_CODE (pat
) != CLOBBER_HIGH
&& !MEM_P (x
)
1430 && reg_overlap_mentioned_p (data
->pat
, x
)))
1434 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1435 (either directly or via STRICT_LOW_PART and similar modifiers). */
1437 set_of (const_rtx pat
, const_rtx insn
)
1439 struct set_of_data data
;
1440 data
.found
= NULL_RTX
;
1442 note_pattern_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1446 /* Add all hard register in X to *PSET. */
1448 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1450 subrtx_iterator::array_type array
;
1451 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1453 const_rtx x
= *iter
;
1454 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1455 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1459 /* This function, called through note_stores, collects sets and
1460 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1463 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1465 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1466 if (REG_P (x
) && HARD_REGISTER_P (x
))
1467 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1470 /* Examine INSN, and compute the set of hard registers written by it.
1471 Store it in *PSET. Should only be called after reload. */
1473 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1477 CLEAR_HARD_REG_SET (*pset
);
1478 note_stores (insn
, record_hard_reg_sets
, pset
);
1479 if (CALL_P (insn
) && implicit
)
1480 *pset
|= call_used_or_fixed_regs
;
1481 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1482 if (REG_NOTE_KIND (link
) == REG_INC
)
1483 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1486 /* Like record_hard_reg_sets, but called through note_uses. */
1488 record_hard_reg_uses (rtx
*px
, void *data
)
1490 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1493 /* Given an INSN, return a SET expression if this insn has only a single SET.
1494 It may also have CLOBBERs, USEs, or SET whose output
1495 will not be used, which we ignore. */
1498 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1501 int set_verified
= 1;
1504 if (GET_CODE (pat
) == PARALLEL
)
1506 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1508 rtx sub
= XVECEXP (pat
, 0, i
);
1509 switch (GET_CODE (sub
))
1517 /* We can consider insns having multiple sets, where all
1518 but one are dead as single set insns. In common case
1519 only single set is present in the pattern so we want
1520 to avoid checking for REG_UNUSED notes unless necessary.
1522 When we reach set first time, we just expect this is
1523 the single set we are looking for and only when more
1524 sets are found in the insn, we check them. */
1527 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1528 && !side_effects_p (set
))
1534 set
= sub
, set_verified
= 0;
1535 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1536 || side_effects_p (sub
))
1548 /* Given an INSN, return nonzero if it has more than one SET, else return
1552 multiple_sets (const_rtx insn
)
1557 /* INSN must be an insn. */
1558 if (! INSN_P (insn
))
1561 /* Only a PARALLEL can have multiple SETs. */
1562 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1564 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1565 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1567 /* If we have already found a SET, then return now. */
1575 /* Either zero or one SET. */
1579 /* Return nonzero if the destination of SET equals the source
1580 and there are no side effects. */
1583 set_noop_p (const_rtx set
)
1585 rtx src
= SET_SRC (set
);
1586 rtx dst
= SET_DEST (set
);
1588 if (dst
== pc_rtx
&& src
== pc_rtx
)
1591 if (MEM_P (dst
) && MEM_P (src
))
1592 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1594 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1595 return rtx_equal_p (XEXP (dst
, 0), src
)
1596 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1597 && !side_effects_p (src
);
1599 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1600 dst
= XEXP (dst
, 0);
1602 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1604 if (maybe_ne (SUBREG_BYTE (src
), SUBREG_BYTE (dst
)))
1606 src
= SUBREG_REG (src
);
1607 dst
= SUBREG_REG (dst
);
1610 /* It is a NOOP if destination overlaps with selected src vector
1612 if (GET_CODE (src
) == VEC_SELECT
1613 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1614 && HARD_REGISTER_P (XEXP (src
, 0))
1615 && HARD_REGISTER_P (dst
))
1618 rtx par
= XEXP (src
, 1);
1619 rtx src0
= XEXP (src
, 0);
1620 poly_int64 c0
= rtx_to_poly_int64 (XVECEXP (par
, 0, 0));
1621 poly_int64 offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1623 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1624 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par
, 0, i
)), c0
+ i
))
1627 REG_CAN_CHANGE_MODE_P (REGNO (dst
), GET_MODE (src0
), GET_MODE (dst
))
1628 && simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1629 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1632 return (REG_P (src
) && REG_P (dst
)
1633 && REGNO (src
) == REGNO (dst
));
1636 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1640 noop_move_p (const rtx_insn
*insn
)
1642 rtx pat
= PATTERN (insn
);
1644 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1647 /* Insns carrying these notes are useful later on. */
1648 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1651 /* Check the code to be executed for COND_EXEC. */
1652 if (GET_CODE (pat
) == COND_EXEC
)
1653 pat
= COND_EXEC_CODE (pat
);
1655 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1658 if (GET_CODE (pat
) == PARALLEL
)
1661 /* If nothing but SETs of registers to themselves,
1662 this insn can also be deleted. */
1663 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1665 rtx tem
= XVECEXP (pat
, 0, i
);
1667 if (GET_CODE (tem
) == USE
1668 || GET_CODE (tem
) == CLOBBER
1669 || GET_CODE (tem
) == CLOBBER_HIGH
)
1672 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1682 /* Return nonzero if register in range [REGNO, ENDREGNO)
1683 appears either explicitly or implicitly in X
1684 other than being stored into.
1686 References contained within the substructure at LOC do not count.
1687 LOC may be zero, meaning don't ignore anything. */
1690 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1694 unsigned int x_regno
;
1699 /* The contents of a REG_NONNEG note is always zero, so we must come here
1700 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1704 code
= GET_CODE (x
);
1709 x_regno
= REGNO (x
);
1711 /* If we modifying the stack, frame, or argument pointer, it will
1712 clobber a virtual register. In fact, we could be more precise,
1713 but it isn't worth it. */
1714 if ((x_regno
== STACK_POINTER_REGNUM
1715 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1716 && x_regno
== ARG_POINTER_REGNUM
)
1717 || x_regno
== FRAME_POINTER_REGNUM
)
1718 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1721 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1724 /* If this is a SUBREG of a hard reg, we can see exactly which
1725 registers are being modified. Otherwise, handle normally. */
1726 if (REG_P (SUBREG_REG (x
))
1727 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1729 unsigned int inner_regno
= subreg_regno (x
);
1730 unsigned int inner_endregno
1731 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1732 ? subreg_nregs (x
) : 1);
1734 return endregno
> inner_regno
&& regno
< inner_endregno
;
1740 if (&SET_DEST (x
) != loc
1741 /* Note setting a SUBREG counts as referring to the REG it is in for
1742 a pseudo but not for hard registers since we can
1743 treat each word individually. */
1744 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1745 && loc
!= &SUBREG_REG (SET_DEST (x
))
1746 && REG_P (SUBREG_REG (SET_DEST (x
)))
1747 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1748 && refers_to_regno_p (regno
, endregno
,
1749 SUBREG_REG (SET_DEST (x
)), loc
))
1750 || (!REG_P (SET_DEST (x
))
1751 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1754 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1763 /* X does not match, so try its subexpressions. */
1765 fmt
= GET_RTX_FORMAT (code
);
1766 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1768 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1776 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1779 else if (fmt
[i
] == 'E')
1782 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1783 if (loc
!= &XVECEXP (x
, i
, j
)
1784 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1791 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1792 we check if any register number in X conflicts with the relevant register
1793 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1794 contains a MEM (we don't bother checking for memory addresses that can't
1795 conflict because we expect this to be a rare case. */
1798 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1800 unsigned int regno
, endregno
;
1802 /* If either argument is a constant, then modifying X cannot
1803 affect IN. Here we look at IN, we can profitably combine
1804 CONSTANT_P (x) with the switch statement below. */
1805 if (CONSTANT_P (in
))
1809 switch (GET_CODE (x
))
1812 case STRICT_LOW_PART
:
1815 /* Overly conservative. */
1820 regno
= REGNO (SUBREG_REG (x
));
1821 if (regno
< FIRST_PSEUDO_REGISTER
)
1822 regno
= subreg_regno (x
);
1823 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1824 ? subreg_nregs (x
) : 1);
1829 endregno
= END_REGNO (x
);
1831 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1841 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1842 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1845 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1848 else if (fmt
[i
] == 'E')
1851 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1852 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1862 return reg_mentioned_p (x
, in
);
1868 /* If any register in here refers to it we return true. */
1869 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1870 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1871 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1877 gcc_assert (CONSTANT_P (x
));
1882 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1883 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1884 ignored by note_stores, but passed to FUN.
1886 FUN receives three arguments:
1887 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1888 2. the SET or CLOBBER rtx that does the store,
1889 3. the pointer DATA provided to note_stores.
1891 If the item being stored in or clobbered is a SUBREG of a hard register,
1892 the SUBREG will be passed. */
1895 note_pattern_stores (const_rtx x
,
1896 void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1900 if (GET_CODE (x
) == COND_EXEC
)
1901 x
= COND_EXEC_CODE (x
);
1903 if (GET_CODE (x
) == SET
1904 || GET_CODE (x
) == CLOBBER
1905 || GET_CODE (x
) == CLOBBER_HIGH
)
1907 rtx dest
= SET_DEST (x
);
1909 while ((GET_CODE (dest
) == SUBREG
1910 && (!REG_P (SUBREG_REG (dest
))
1911 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1912 || GET_CODE (dest
) == ZERO_EXTRACT
1913 || GET_CODE (dest
) == STRICT_LOW_PART
)
1914 dest
= XEXP (dest
, 0);
1916 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1917 each of whose first operand is a register. */
1918 if (GET_CODE (dest
) == PARALLEL
)
1920 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1921 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1922 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1925 (*fun
) (dest
, x
, data
);
1928 else if (GET_CODE (x
) == PARALLEL
)
1929 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1930 note_pattern_stores (XVECEXP (x
, 0, i
), fun
, data
);
1933 /* Same, but for an instruction. If the instruction is a call, include
1934 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1937 note_stores (const rtx_insn
*insn
,
1938 void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1941 for (rtx link
= CALL_INSN_FUNCTION_USAGE (insn
);
1942 link
; link
= XEXP (link
, 1))
1943 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1944 note_pattern_stores (XEXP (link
, 0), fun
, data
);
1945 note_pattern_stores (PATTERN (insn
), fun
, data
);
1948 /* Like notes_stores, but call FUN for each expression that is being
1949 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1950 FUN for each expression, not any interior subexpressions. FUN receives a
1951 pointer to the expression and the DATA passed to this function.
1953 Note that this is not quite the same test as that done in reg_referenced_p
1954 since that considers something as being referenced if it is being
1955 partially set, while we do not. */
1958 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1963 switch (GET_CODE (body
))
1966 (*fun
) (&COND_EXEC_TEST (body
), data
);
1967 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1971 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1972 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1976 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1977 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1981 (*fun
) (&XEXP (body
, 0), data
);
1985 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1986 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1990 (*fun
) (&TRAP_CONDITION (body
), data
);
1994 (*fun
) (&XEXP (body
, 0), data
);
1998 case UNSPEC_VOLATILE
:
1999 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
2000 (*fun
) (&XVECEXP (body
, 0, i
), data
);
2004 if (MEM_P (XEXP (body
, 0)))
2005 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
2010 rtx dest
= SET_DEST (body
);
2012 /* For sets we replace everything in source plus registers in memory
2013 expression in store and operands of a ZERO_EXTRACT. */
2014 (*fun
) (&SET_SRC (body
), data
);
2016 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2018 (*fun
) (&XEXP (dest
, 1), data
);
2019 (*fun
) (&XEXP (dest
, 2), data
);
2022 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
2023 dest
= XEXP (dest
, 0);
2026 (*fun
) (&XEXP (dest
, 0), data
);
2031 /* All the other possibilities never store. */
2032 (*fun
) (pbody
, data
);
2037 /* Return nonzero if X's old contents don't survive after INSN.
2038 This will be true if X is (cc0) or if X is a register and
2039 X dies in INSN or because INSN entirely sets X.
2041 "Entirely set" means set directly and not through a SUBREG, or
2042 ZERO_EXTRACT, so no trace of the old contents remains.
2043 Likewise, REG_INC does not count.
2045 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2046 but for this use that makes no difference, since regs don't overlap
2047 during their lifetimes. Therefore, this function may be used
2048 at any time after deaths have been computed.
2050 If REG is a hard reg that occupies multiple machine registers, this
2051 function will only return 1 if each of those registers will be replaced
2055 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
2057 unsigned int regno
, end_regno
;
2060 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2061 if (GET_CODE (x
) == CC0
)
2064 gcc_assert (REG_P (x
));
2067 end_regno
= END_REGNO (x
);
2068 for (i
= regno
; i
< end_regno
; i
++)
2069 if (! dead_or_set_regno_p (insn
, i
))
2075 /* Return TRUE iff DEST is a register or subreg of a register, is a
2076 complete rather than read-modify-write destination, and contains
2077 register TEST_REGNO. */
2080 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2082 unsigned int regno
, endregno
;
2084 if (GET_CODE (dest
) == SUBREG
&& !read_modify_subreg_p (dest
))
2085 dest
= SUBREG_REG (dest
);
2090 regno
= REGNO (dest
);
2091 endregno
= END_REGNO (dest
);
2092 return (test_regno
>= regno
&& test_regno
< endregno
);
2095 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2096 any member matches the covers_regno_no_parallel_p criteria. */
2099 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2101 if (GET_CODE (dest
) == PARALLEL
)
2103 /* Some targets place small structures in registers for return
2104 values of functions, and those registers are wrapped in
2105 PARALLELs that we may see as the destination of a SET. */
2108 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2110 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2111 if (inner
!= NULL_RTX
2112 && covers_regno_no_parallel_p (inner
, test_regno
))
2119 return covers_regno_no_parallel_p (dest
, test_regno
);
2122 /* Utility function for dead_or_set_p to check an individual register. */
2125 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2129 /* See if there is a death note for something that includes TEST_REGNO. */
2130 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2134 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2137 pattern
= PATTERN (insn
);
2139 /* If a COND_EXEC is not executed, the value survives. */
2140 if (GET_CODE (pattern
) == COND_EXEC
)
2143 if (GET_CODE (pattern
) == SET
|| GET_CODE (pattern
) == CLOBBER
)
2144 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2145 else if (GET_CODE (pattern
) == PARALLEL
)
2149 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2151 rtx body
= XVECEXP (pattern
, 0, i
);
2153 if (GET_CODE (body
) == COND_EXEC
)
2154 body
= COND_EXEC_CODE (body
);
2156 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2157 && covers_regno_p (SET_DEST (body
), test_regno
))
2165 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2166 If DATUM is nonzero, look for one whose datum is DATUM. */
2169 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2173 gcc_checking_assert (insn
);
2175 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2176 if (! INSN_P (insn
))
2180 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2181 if (REG_NOTE_KIND (link
) == kind
)
2186 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2187 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2192 /* Return the reg-note of kind KIND in insn INSN which applies to register
2193 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2194 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2195 it might be the case that the note overlaps REGNO. */
2198 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2202 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2203 if (! INSN_P (insn
))
2206 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2207 if (REG_NOTE_KIND (link
) == kind
2208 /* Verify that it is a register, so that scratch and MEM won't cause a
2210 && REG_P (XEXP (link
, 0))
2211 && REGNO (XEXP (link
, 0)) <= regno
2212 && END_REGNO (XEXP (link
, 0)) > regno
)
2217 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2221 find_reg_equal_equiv_note (const_rtx insn
)
2228 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2229 if (REG_NOTE_KIND (link
) == REG_EQUAL
2230 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2232 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2233 insns that have multiple sets. Checking single_set to
2234 make sure of this is not the proper check, as explained
2235 in the comment in set_unique_reg_note.
2237 This should be changed into an assert. */
2238 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2245 /* Check whether INSN is a single_set whose source is known to be
2246 equivalent to a constant. Return that constant if so, otherwise
2250 find_constant_src (const rtx_insn
*insn
)
2254 set
= single_set (insn
);
2257 x
= avoid_constant_pool_reference (SET_SRC (set
));
2262 note
= find_reg_equal_equiv_note (insn
);
2263 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2264 return XEXP (note
, 0);
2269 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2270 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2273 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2275 /* If it's not a CALL_INSN, it can't possibly have a
2276 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2286 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2288 link
= XEXP (link
, 1))
2289 if (GET_CODE (XEXP (link
, 0)) == code
2290 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2295 unsigned int regno
= REGNO (datum
);
2297 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2298 to pseudo registers, so don't bother checking. */
2300 if (regno
< FIRST_PSEUDO_REGISTER
)
2302 unsigned int end_regno
= END_REGNO (datum
);
2305 for (i
= regno
; i
< end_regno
; i
++)
2306 if (find_regno_fusage (insn
, code
, i
))
2314 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2315 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2318 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2322 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2323 to pseudo registers, so don't bother checking. */
2325 if (regno
>= FIRST_PSEUDO_REGISTER
2329 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2333 if (GET_CODE (op
= XEXP (link
, 0)) == code
2334 && REG_P (reg
= XEXP (op
, 0))
2335 && REGNO (reg
) <= regno
2336 && END_REGNO (reg
) > regno
)
2344 /* Return true if KIND is an integer REG_NOTE. */
2347 int_reg_note_p (enum reg_note kind
)
2349 return kind
== REG_BR_PROB
;
2352 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2353 stored as the pointer to the next register note. */
2356 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2360 gcc_checking_assert (!int_reg_note_p (kind
));
2365 case REG_LABEL_TARGET
:
2366 case REG_LABEL_OPERAND
:
2368 /* These types of register notes use an INSN_LIST rather than an
2369 EXPR_LIST, so that copying is done right and dumps look
2371 note
= alloc_INSN_LIST (datum
, list
);
2372 PUT_REG_NOTE_KIND (note
, kind
);
2376 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2383 /* Add register note with kind KIND and datum DATUM to INSN. */
2386 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2388 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2391 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2394 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2396 gcc_checking_assert (int_reg_note_p (kind
));
2397 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2398 datum
, REG_NOTES (insn
));
2401 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2404 add_args_size_note (rtx_insn
*insn
, poly_int64 value
)
2406 gcc_checking_assert (!find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
));
2407 add_reg_note (insn
, REG_ARGS_SIZE
, gen_int_mode (value
, Pmode
));
2410 /* Add a register note like NOTE to INSN. */
2413 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2415 if (GET_CODE (note
) == INT_LIST
)
2416 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2418 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2421 /* Duplicate NOTE and return the copy. */
2423 duplicate_reg_note (rtx note
)
2425 reg_note kind
= REG_NOTE_KIND (note
);
2427 if (GET_CODE (note
) == INT_LIST
)
2428 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2429 else if (GET_CODE (note
) == EXPR_LIST
)
2430 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2432 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2435 /* Remove register note NOTE from the REG_NOTES of INSN. */
2438 remove_note (rtx_insn
*insn
, const_rtx note
)
2442 if (note
== NULL_RTX
)
2445 if (REG_NOTES (insn
) == note
)
2446 REG_NOTES (insn
) = XEXP (note
, 1);
2448 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2449 if (XEXP (link
, 1) == note
)
2451 XEXP (link
, 1) = XEXP (note
, 1);
2455 switch (REG_NOTE_KIND (note
))
2459 df_notes_rescan (insn
);
2466 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2467 Return true if any note has been removed. */
2470 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2475 loc
= ®_NOTES (insn
);
2478 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2479 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2481 *loc
= XEXP (*loc
, 1);
2485 loc
= &XEXP (*loc
, 1);
2490 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2493 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2500 /* This loop is a little tricky. We cannot just go down the chain because
2501 it is being modified by some actions in the loop. So we just iterate
2502 over the head. We plan to drain the list anyway. */
2503 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2505 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2506 rtx note
= find_reg_equal_equiv_note (insn
);
2508 /* This assert is generally triggered when someone deletes a REG_EQUAL
2509 or REG_EQUIV note by hacking the list manually rather than calling
2513 remove_note (insn
, note
);
2517 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2518 return 1 if it is found. A simple equality test is used to determine if
2522 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2526 for (x
= listp
; x
; x
= XEXP (x
, 1))
2527 if (node
== XEXP (x
, 0))
2533 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2534 remove that entry from the list if it is found.
2536 A simple equality test is used to determine if NODE matches. */
2539 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2541 rtx_expr_list
*temp
= *listp
;
2542 rtx_expr_list
*prev
= NULL
;
2546 if (node
== temp
->element ())
2548 /* Splice the node out of the list. */
2550 XEXP (prev
, 1) = temp
->next ();
2552 *listp
= temp
->next ();
2558 temp
= temp
->next ();
2562 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2563 remove that entry from the list if it is found.
2565 A simple equality test is used to determine if NODE matches. */
2568 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2570 rtx_insn_list
*temp
= *listp
;
2571 rtx_insn_list
*prev
= NULL
;
2575 if (node
== temp
->insn ())
2577 /* Splice the node out of the list. */
2579 XEXP (prev
, 1) = temp
->next ();
2581 *listp
= temp
->next ();
2587 temp
= temp
->next ();
2591 /* Nonzero if X contains any volatile instructions. These are instructions
2592 which may cause unpredictable machine state instructions, and thus no
2593 instructions or register uses should be moved or combined across them.
2594 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2597 volatile_insn_p (const_rtx x
)
2599 const RTX_CODE code
= GET_CODE (x
);
2617 case UNSPEC_VOLATILE
:
2622 if (MEM_VOLATILE_P (x
))
2629 /* Recursively scan the operands of this expression. */
2632 const char *const fmt
= GET_RTX_FORMAT (code
);
2635 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2639 if (volatile_insn_p (XEXP (x
, i
)))
2642 else if (fmt
[i
] == 'E')
2645 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2646 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2654 /* Nonzero if X contains any volatile memory references
2655 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2658 volatile_refs_p (const_rtx x
)
2660 const RTX_CODE code
= GET_CODE (x
);
2676 case UNSPEC_VOLATILE
:
2682 if (MEM_VOLATILE_P (x
))
2689 /* Recursively scan the operands of this expression. */
2692 const char *const fmt
= GET_RTX_FORMAT (code
);
2695 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2699 if (volatile_refs_p (XEXP (x
, i
)))
2702 else if (fmt
[i
] == 'E')
2705 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2706 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2714 /* Similar to above, except that it also rejects register pre- and post-
2718 side_effects_p (const_rtx x
)
2720 const RTX_CODE code
= GET_CODE (x
);
2737 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2738 when some combination can't be done. If we see one, don't think
2739 that we can simplify the expression. */
2740 return (GET_MODE (x
) != VOIDmode
);
2749 case UNSPEC_VOLATILE
:
2755 if (MEM_VOLATILE_P (x
))
2762 /* Recursively scan the operands of this expression. */
2765 const char *fmt
= GET_RTX_FORMAT (code
);
2768 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2772 if (side_effects_p (XEXP (x
, i
)))
2775 else if (fmt
[i
] == 'E')
2778 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2779 if (side_effects_p (XVECEXP (x
, i
, j
)))
2787 /* Return nonzero if evaluating rtx X might cause a trap.
2788 FLAGS controls how to consider MEMs. A nonzero means the context
2789 of the access may have changed from the original, such that the
2790 address may have become invalid. */
2793 may_trap_p_1 (const_rtx x
, unsigned flags
)
2799 /* We make no distinction currently, but this function is part of
2800 the internal target-hooks ABI so we keep the parameter as
2801 "unsigned flags". */
2802 bool code_changed
= flags
!= 0;
2806 code
= GET_CODE (x
);
2809 /* Handle these cases quickly. */
2821 return targetm
.unspec_may_trap_p (x
, flags
);
2823 case UNSPEC_VOLATILE
:
2829 return MEM_VOLATILE_P (x
);
2831 /* Memory ref can trap unless it's a static var or a stack slot. */
2833 /* Recognize specific pattern of stack checking probes. */
2834 if (flag_stack_check
2835 && MEM_VOLATILE_P (x
)
2836 && XEXP (x
, 0) == stack_pointer_rtx
)
2838 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2839 reference; moving it out of context such as when moving code
2840 when optimizing, might cause its address to become invalid. */
2842 || !MEM_NOTRAP_P (x
))
2844 poly_int64 size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : -1;
2845 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2846 GET_MODE (x
), code_changed
);
2851 /* Division by a non-constant might trap. */
2856 if (HONOR_SNANS (x
))
2858 if (FLOAT_MODE_P (GET_MODE (x
)))
2859 return flag_trapping_math
;
2860 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2862 if (GET_CODE (XEXP (x
, 1)) == CONST_VECTOR
)
2864 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
2865 unsigned int n_elts
;
2866 rtx op
= XEXP (x
, 1);
2867 if (!GET_MODE_NUNITS (GET_MODE (op
)).is_constant (&n_elts
))
2869 if (!CONST_VECTOR_DUPLICATE_P (op
))
2871 for (unsigned i
= 0; i
< (unsigned int) XVECLEN (op
, 0); i
++)
2872 if (CONST_VECTOR_ENCODED_ELT (op
, i
) == const0_rtx
)
2876 for (unsigned i
= 0; i
< n_elts
; i
++)
2877 if (CONST_VECTOR_ELT (op
, i
) == const0_rtx
)
2883 /* An EXPR_LIST is used to represent a function call. This
2884 certainly may trap. */
2893 /* Some floating point comparisons may trap. */
2894 if (!flag_trapping_math
)
2896 /* ??? There is no machine independent way to check for tests that trap
2897 when COMPARE is used, though many targets do make this distinction.
2898 For instance, sparc uses CCFPE for compares which generate exceptions
2899 and CCFP for compares which do not generate exceptions. */
2902 /* But often the compare has some CC mode, so check operand
2904 if (HONOR_NANS (XEXP (x
, 0))
2905 || HONOR_NANS (XEXP (x
, 1)))
2911 if (HONOR_SNANS (x
))
2913 /* Often comparison is CC mode, so check operand modes. */
2914 if (HONOR_SNANS (XEXP (x
, 0))
2915 || HONOR_SNANS (XEXP (x
, 1)))
2920 /* Conversion of floating point might trap. */
2921 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2932 /* These operations don't trap even with floating point. */
2936 /* Any floating arithmetic may trap. */
2937 if (FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2941 fmt
= GET_RTX_FORMAT (code
);
2942 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2946 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2949 else if (fmt
[i
] == 'E')
2952 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2953 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2960 /* Return nonzero if evaluating rtx X might cause a trap. */
2963 may_trap_p (const_rtx x
)
2965 return may_trap_p_1 (x
, 0);
2968 /* Same as above, but additionally return nonzero if evaluating rtx X might
2969 cause a fault. We define a fault for the purpose of this function as a
2970 erroneous execution condition that cannot be encountered during the normal
2971 execution of a valid program; the typical example is an unaligned memory
2972 access on a strict alignment machine. The compiler guarantees that it
2973 doesn't generate code that will fault from a valid program, but this
2974 guarantee doesn't mean anything for individual instructions. Consider
2975 the following example:
2977 struct S { int d; union { char *cp; int *ip; }; };
2979 int foo(struct S *s)
2987 on a strict alignment machine. In a valid program, foo will never be
2988 invoked on a structure for which d is equal to 1 and the underlying
2989 unique field of the union not aligned on a 4-byte boundary, but the
2990 expression *s->ip might cause a fault if considered individually.
2992 At the RTL level, potentially problematic expressions will almost always
2993 verify may_trap_p; for example, the above dereference can be emitted as
2994 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2995 However, suppose that foo is inlined in a caller that causes s->cp to
2996 point to a local character variable and guarantees that s->d is not set
2997 to 1; foo may have been effectively translated into pseudo-RTL as:
3000 (set (reg:SI) (mem:SI (%fp - 7)))
3002 (set (reg:QI) (mem:QI (%fp - 7)))
3004 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3005 memory reference to a stack slot, but it will certainly cause a fault
3006 on a strict alignment machine. */
3009 may_trap_or_fault_p (const_rtx x
)
3011 return may_trap_p_1 (x
, 1);
3014 /* Return nonzero if X contains a comparison that is not either EQ or NE,
3015 i.e., an inequality. */
3018 inequality_comparisons_p (const_rtx x
)
3022 const enum rtx_code code
= GET_CODE (x
);
3050 len
= GET_RTX_LENGTH (code
);
3051 fmt
= GET_RTX_FORMAT (code
);
3053 for (i
= 0; i
< len
; i
++)
3057 if (inequality_comparisons_p (XEXP (x
, i
)))
3060 else if (fmt
[i
] == 'E')
3063 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3064 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
3072 /* Replace any occurrence of FROM in X with TO. The function does
3073 not enter into CONST_DOUBLE for the replace.
3075 Note that copying is not done so X must not be shared unless all copies
3078 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3079 those pointer-equal ones. */
3082 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
3090 /* Allow this function to make replacements in EXPR_LISTs. */
3097 && REGNO (x
) == REGNO (from
))
3099 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3102 else if (GET_CODE (x
) == SUBREG
)
3104 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3106 if (CONST_INT_P (new_rtx
))
3108 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3109 GET_MODE (SUBREG_REG (x
)),
3114 SUBREG_REG (x
) = new_rtx
;
3118 else if (GET_CODE (x
) == ZERO_EXTEND
)
3120 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3122 if (CONST_INT_P (new_rtx
))
3124 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3125 new_rtx
, GET_MODE (XEXP (x
, 0)));
3129 XEXP (x
, 0) = new_rtx
;
3134 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3135 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3138 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3139 else if (fmt
[i
] == 'E')
3140 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3141 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3142 from
, to
, all_regs
);
3148 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3149 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3152 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3154 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3156 if (JUMP_TABLE_DATA_P (x
))
3159 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3160 int len
= GET_NUM_ELEM (vec
);
3161 for (int i
= 0; i
< len
; ++i
)
3163 rtx ref
= RTVEC_ELT (vec
, i
);
3164 if (XEXP (ref
, 0) == old_label
)
3166 XEXP (ref
, 0) = new_label
;
3167 if (update_label_nuses
)
3169 ++LABEL_NUSES (new_label
);
3170 --LABEL_NUSES (old_label
);
3177 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3178 field. This is not handled by the iterator because it doesn't
3179 handle unprinted ('0') fields. */
3180 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3181 JUMP_LABEL (x
) = new_label
;
3183 subrtx_ptr_iterator::array_type array
;
3184 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3189 if (GET_CODE (x
) == SYMBOL_REF
3190 && CONSTANT_POOL_ADDRESS_P (x
))
3192 rtx c
= get_pool_constant (x
);
3193 if (rtx_referenced_p (old_label
, c
))
3195 /* Create a copy of constant C; replace the label inside
3196 but do not update LABEL_NUSES because uses in constant pool
3198 rtx new_c
= copy_rtx (c
);
3199 replace_label (&new_c
, old_label
, new_label
, false);
3201 /* Add the new constant NEW_C to constant pool and replace
3202 the old reference to constant by new reference. */
3203 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3204 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3208 if ((GET_CODE (x
) == LABEL_REF
3209 || GET_CODE (x
) == INSN_LIST
)
3210 && XEXP (x
, 0) == old_label
)
3212 XEXP (x
, 0) = new_label
;
3213 if (update_label_nuses
)
3215 ++LABEL_NUSES (new_label
);
3216 --LABEL_NUSES (old_label
);
3224 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3225 rtx_insn
*new_label
, bool update_label_nuses
)
3227 rtx insn_as_rtx
= insn
;
3228 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3229 gcc_checking_assert (insn_as_rtx
== insn
);
3232 /* Return true if X is referenced in BODY. */
3235 rtx_referenced_p (const_rtx x
, const_rtx body
)
3237 subrtx_iterator::array_type array
;
3238 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3239 if (const_rtx y
= *iter
)
3241 /* Check if a label_ref Y refers to label X. */
3242 if (GET_CODE (y
) == LABEL_REF
3244 && label_ref_label (y
) == x
)
3247 if (rtx_equal_p (x
, y
))
3250 /* If Y is a reference to pool constant traverse the constant. */
3251 if (GET_CODE (y
) == SYMBOL_REF
3252 && CONSTANT_POOL_ADDRESS_P (y
))
3253 iter
.substitute (get_pool_constant (y
));
3258 /* If INSN is a tablejump return true and store the label (before jump table) to
3259 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3262 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3263 rtx_jump_table_data
**tablep
)
3268 rtx target
= JUMP_LABEL (insn
);
3269 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3272 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3273 rtx_insn
*table
= next_insn (label
);
3274 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3280 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3284 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3285 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3288 tablejump_casesi_pattern (const rtx_insn
*insn
)
3292 if ((tmp
= single_set (insn
)) != NULL
3293 && SET_DEST (tmp
) == pc_rtx
3294 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3295 && GET_CODE (XEXP (SET_SRC (tmp
), 2)) == LABEL_REF
)
3301 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3302 constant that is not in the constant pool and not in the condition
3303 of an IF_THEN_ELSE. */
3306 computed_jump_p_1 (const_rtx x
)
3308 const enum rtx_code code
= GET_CODE (x
);
3325 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3326 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3329 return (computed_jump_p_1 (XEXP (x
, 1))
3330 || computed_jump_p_1 (XEXP (x
, 2)));
3336 fmt
= GET_RTX_FORMAT (code
);
3337 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3340 && computed_jump_p_1 (XEXP (x
, i
)))
3343 else if (fmt
[i
] == 'E')
3344 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3345 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3352 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3354 Tablejumps and casesi insns are not considered indirect jumps;
3355 we can recognize them by a (use (label_ref)). */
3358 computed_jump_p (const rtx_insn
*insn
)
3363 rtx pat
= PATTERN (insn
);
3365 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3366 if (JUMP_LABEL (insn
) != NULL
)
3369 if (GET_CODE (pat
) == PARALLEL
)
3371 int len
= XVECLEN (pat
, 0);
3372 int has_use_labelref
= 0;
3374 for (i
= len
- 1; i
>= 0; i
--)
3375 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3376 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3379 has_use_labelref
= 1;
3383 if (! has_use_labelref
)
3384 for (i
= len
- 1; i
>= 0; i
--)
3385 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3386 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3387 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3390 else if (GET_CODE (pat
) == SET
3391 && SET_DEST (pat
) == pc_rtx
3392 && computed_jump_p_1 (SET_SRC (pat
)))
3400 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3401 the equivalent add insn and pass the result to FN, using DATA as the
3405 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3407 rtx x
= XEXP (mem
, 0);
3408 switch (GET_CODE (x
))
3413 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3414 rtx r1
= XEXP (x
, 0);
3415 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3416 return fn (mem
, x
, r1
, r1
, c
, data
);
3422 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3423 rtx r1
= XEXP (x
, 0);
3424 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3425 return fn (mem
, x
, r1
, r1
, c
, data
);
3431 rtx r1
= XEXP (x
, 0);
3432 rtx add
= XEXP (x
, 1);
3433 return fn (mem
, x
, r1
, add
, NULL
, data
);
3441 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3442 For each such autoinc operation found, call FN, passing it
3443 the innermost enclosing MEM, the operation itself, the RTX modified
3444 by the operation, two RTXs (the second may be NULL) that, once
3445 added, represent the value to be held by the modified RTX
3446 afterwards, and DATA. FN is to return 0 to continue the
3447 traversal or any other value to have it returned to the caller of
3448 for_each_inc_dec. */
3451 for_each_inc_dec (rtx x
,
3452 for_each_inc_dec_fn fn
,
3455 subrtx_var_iterator::array_type array
;
3456 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3461 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3463 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3466 iter
.skip_subrtxes ();
3473 /* Searches X for any reference to REGNO, returning the rtx of the
3474 reference found if any. Otherwise, returns NULL_RTX. */
3477 regno_use_in (unsigned int regno
, rtx x
)
3483 if (REG_P (x
) && REGNO (x
) == regno
)
3486 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3487 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3491 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3494 else if (fmt
[i
] == 'E')
3495 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3496 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3503 /* Return a value indicating whether OP, an operand of a commutative
3504 operation, is preferred as the first or second operand. The more
3505 positive the value, the stronger the preference for being the first
3509 commutative_operand_precedence (rtx op
)
3511 enum rtx_code code
= GET_CODE (op
);
3513 /* Constants always become the second operand. Prefer "nice" constants. */
3514 if (code
== CONST_INT
)
3516 if (code
== CONST_WIDE_INT
)
3518 if (code
== CONST_POLY_INT
)
3520 if (code
== CONST_DOUBLE
)
3522 if (code
== CONST_FIXED
)
3524 op
= avoid_constant_pool_reference (op
);
3525 code
= GET_CODE (op
);
3527 switch (GET_RTX_CLASS (code
))
3530 if (code
== CONST_INT
)
3532 if (code
== CONST_WIDE_INT
)
3534 if (code
== CONST_POLY_INT
)
3536 if (code
== CONST_DOUBLE
)
3538 if (code
== CONST_FIXED
)
3543 /* SUBREGs of objects should come second. */
3544 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3549 /* Complex expressions should be the first, so decrease priority
3550 of objects. Prefer pointer objects over non pointer objects. */
3551 if ((REG_P (op
) && REG_POINTER (op
))
3552 || (MEM_P (op
) && MEM_POINTER (op
)))
3556 case RTX_COMM_ARITH
:
3557 /* Prefer operands that are themselves commutative to be first.
3558 This helps to make things linear. In particular,
3559 (and (and (reg) (reg)) (not (reg))) is canonical. */
3563 /* If only one operand is a binary expression, it will be the first
3564 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3565 is canonical, although it will usually be further simplified. */
3569 /* Then prefer NEG and NOT. */
3570 if (code
== NEG
|| code
== NOT
)
3579 /* Return 1 iff it is necessary to swap operands of commutative operation
3580 in order to canonicalize expression. */
3583 swap_commutative_operands_p (rtx x
, rtx y
)
3585 return (commutative_operand_precedence (x
)
3586 < commutative_operand_precedence (y
));
3589 /* Return 1 if X is an autoincrement side effect and the register is
3590 not the stack pointer. */
3592 auto_inc_p (const_rtx x
)
3594 switch (GET_CODE (x
))
3602 /* There are no REG_INC notes for SP. */
3603 if (XEXP (x
, 0) != stack_pointer_rtx
)
3611 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3613 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3622 code
= GET_CODE (in
);
3623 fmt
= GET_RTX_FORMAT (code
);
3624 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3628 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3631 else if (fmt
[i
] == 'E')
3632 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3633 if (loc
== &XVECEXP (in
, i
, j
)
3634 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3640 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3641 and SUBREG_BYTE, return the bit offset where the subreg begins
3642 (counting from the least significant bit of the operand). */
3645 subreg_lsb_1 (machine_mode outer_mode
,
3646 machine_mode inner_mode
,
3647 poly_uint64 subreg_byte
)
3649 poly_uint64 subreg_end
, trailing_bytes
, byte_pos
;
3651 /* A paradoxical subreg begins at bit position 0. */
3652 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
3655 subreg_end
= subreg_byte
+ GET_MODE_SIZE (outer_mode
);
3656 trailing_bytes
= GET_MODE_SIZE (inner_mode
) - subreg_end
;
3657 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3658 byte_pos
= trailing_bytes
;
3659 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3660 byte_pos
= subreg_byte
;
3663 /* When bytes and words have opposite endianness, we must be able
3664 to split offsets into words and bytes at compile time. */
3665 poly_uint64 leading_word_part
3666 = force_align_down (subreg_byte
, UNITS_PER_WORD
);
3667 poly_uint64 trailing_word_part
3668 = force_align_down (trailing_bytes
, UNITS_PER_WORD
);
3669 /* If the subreg crosses a word boundary ensure that
3670 it also begins and ends on a word boundary. */
3671 gcc_assert (known_le (subreg_end
- leading_word_part
,
3672 (unsigned int) UNITS_PER_WORD
)
3673 || (known_eq (leading_word_part
, subreg_byte
)
3674 && known_eq (trailing_word_part
, trailing_bytes
)));
3675 if (WORDS_BIG_ENDIAN
)
3676 byte_pos
= trailing_word_part
+ (subreg_byte
- leading_word_part
);
3678 byte_pos
= leading_word_part
+ (trailing_bytes
- trailing_word_part
);
3681 return byte_pos
* BITS_PER_UNIT
;
3684 /* Given a subreg X, return the bit offset where the subreg begins
3685 (counting from the least significant bit of the reg). */
3688 subreg_lsb (const_rtx x
)
3690 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3694 /* Return the subreg byte offset for a subreg whose outer value has
3695 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3696 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3697 lsb of the inner value. This is the inverse of the calculation
3698 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3701 subreg_size_offset_from_lsb (poly_uint64 outer_bytes
, poly_uint64 inner_bytes
,
3702 poly_uint64 lsb_shift
)
3704 /* A paradoxical subreg begins at bit position 0. */
3705 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3706 if (maybe_gt (outer_bytes
, inner_bytes
))
3708 gcc_checking_assert (known_eq (lsb_shift
, 0U));
3712 poly_uint64 lower_bytes
= exact_div (lsb_shift
, BITS_PER_UNIT
);
3713 poly_uint64 upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3714 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3716 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3720 /* When bytes and words have opposite endianness, we must be able
3721 to split offsets into words and bytes at compile time. */
3722 poly_uint64 lower_word_part
= force_align_down (lower_bytes
,
3724 poly_uint64 upper_word_part
= force_align_down (upper_bytes
,
3726 if (WORDS_BIG_ENDIAN
)
3727 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3729 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3733 /* Fill in information about a subreg of a hard register.
3734 xregno - A regno of an inner hard subreg_reg (or what will become one).
3735 xmode - The mode of xregno.
3736 offset - The byte offset.
3737 ymode - The mode of a top level SUBREG (or what may become one).
3738 info - Pointer to structure to fill in.
3740 Rather than considering one particular inner register (and thus one
3741 particular "outer" register) in isolation, this function really uses
3742 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3743 function does not check whether adding INFO->offset to XREGNO gives
3744 a valid hard register; even if INFO->offset + XREGNO is out of range,
3745 there might be another register of the same type that is in range.
3746 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3747 the new register, since that can depend on things like whether the final
3748 register number is even or odd. Callers that want to check whether
3749 this particular subreg can be replaced by a simple (reg ...) should
3750 use simplify_subreg_regno. */
3753 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3754 poly_uint64 offset
, machine_mode ymode
,
3755 struct subreg_info
*info
)
3757 unsigned int nregs_xmode
, nregs_ymode
;
3759 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3761 poly_uint64 xsize
= GET_MODE_SIZE (xmode
);
3762 poly_uint64 ysize
= GET_MODE_SIZE (ymode
);
3764 bool rknown
= false;
3766 /* If the register representation of a non-scalar mode has holes in it,
3767 we expect the scalar units to be concatenated together, with the holes
3768 distributed evenly among the scalar units. Each scalar unit must occupy
3769 at least one register. */
3770 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3772 /* As a consequence, we must be dealing with a constant number of
3773 scalars, and thus a constant offset and number of units. */
3774 HOST_WIDE_INT coffset
= offset
.to_constant ();
3775 HOST_WIDE_INT cysize
= ysize
.to_constant ();
3776 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3777 unsigned int nunits
= GET_MODE_NUNITS (xmode
).to_constant ();
3778 scalar_mode xmode_unit
= GET_MODE_INNER (xmode
);
3779 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3780 gcc_assert (nregs_xmode
3782 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3783 gcc_assert (hard_regno_nregs (xregno
, xmode
)
3784 == hard_regno_nregs (xregno
, xmode_unit
) * nunits
);
3786 /* You can only ask for a SUBREG of a value with holes in the middle
3787 if you don't cross the holes. (Such a SUBREG should be done by
3788 picking a different register class, or doing it in memory if
3789 necessary.) An example of a value with holes is XCmode on 32-bit
3790 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3791 3 for each part, but in memory it's two 128-bit parts.
3792 Padding is assumed to be at the end (not necessarily the 'high part')
3794 if ((coffset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3795 && (coffset
/ GET_MODE_SIZE (xmode_unit
)
3796 != ((coffset
+ cysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3798 info
->representable_p
= false;
3803 nregs_xmode
= hard_regno_nregs (xregno
, xmode
);
3805 nregs_ymode
= hard_regno_nregs (xregno
, ymode
);
3807 /* Subreg sizes must be ordered, so that we can tell whether they are
3808 partial, paradoxical or complete. */
3809 gcc_checking_assert (ordered_p (xsize
, ysize
));
3811 /* Paradoxical subregs are otherwise valid. */
3812 if (!rknown
&& known_eq (offset
, 0U) && maybe_gt (ysize
, xsize
))
3814 info
->representable_p
= true;
3815 /* If this is a big endian paradoxical subreg, which uses more
3816 actual hard registers than the original register, we must
3817 return a negative offset so that we find the proper highpart
3820 We assume that the ordering of registers within a multi-register
3821 value has a consistent endianness: if bytes and register words
3822 have different endianness, the hard registers that make up a
3823 multi-register value must be at least word-sized. */
3824 if (REG_WORDS_BIG_ENDIAN
)
3825 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3828 info
->nregs
= nregs_ymode
;
3832 /* If registers store different numbers of bits in the different
3833 modes, we cannot generally form this subreg. */
3834 poly_uint64 regsize_xmode
, regsize_ymode
;
3835 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3836 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3837 && multiple_p (xsize
, nregs_xmode
, ®size_xmode
)
3838 && multiple_p (ysize
, nregs_ymode
, ®size_ymode
))
3841 && ((nregs_ymode
> 1 && maybe_gt (regsize_xmode
, regsize_ymode
))
3842 || (nregs_xmode
> 1 && maybe_gt (regsize_ymode
, regsize_xmode
))))
3844 info
->representable_p
= false;
3845 if (!can_div_away_from_zero_p (ysize
, regsize_xmode
, &info
->nregs
)
3846 || !can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3847 /* Checked by validate_subreg. We must know at compile time
3848 which inner registers are being accessed. */
3852 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3853 would go outside of XMODE. */
3854 if (!rknown
&& maybe_gt (ysize
+ offset
, xsize
))
3856 info
->representable_p
= false;
3857 info
->nregs
= nregs_ymode
;
3858 if (!can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3859 /* Checked by validate_subreg. We must know at compile time
3860 which inner registers are being accessed. */
3864 /* Quick exit for the simple and common case of extracting whole
3865 subregisters from a multiregister value. */
3866 /* ??? It would be better to integrate this into the code below,
3867 if we can generalize the concept enough and figure out how
3868 odd-sized modes can coexist with the other weird cases we support. */
3869 HOST_WIDE_INT count
;
3871 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3872 && known_eq (regsize_xmode
, regsize_ymode
)
3873 && constant_multiple_p (offset
, regsize_ymode
, &count
))
3875 info
->representable_p
= true;
3876 info
->nregs
= nregs_ymode
;
3877 info
->offset
= count
;
3878 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3883 /* Lowpart subregs are otherwise valid. */
3884 if (!rknown
&& known_eq (offset
, subreg_lowpart_offset (ymode
, xmode
)))
3886 info
->representable_p
= true;
3889 if (known_eq (offset
, 0U) || nregs_xmode
== nregs_ymode
)
3892 info
->nregs
= nregs_ymode
;
3897 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3898 values there are in (reg:XMODE XREGNO). We can view the register
3899 as consisting of this number of independent "blocks", where each
3900 block occupies NREGS_YMODE registers and contains exactly one
3901 representable YMODE value. */
3902 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3903 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3905 /* Calculate the number of bytes in each block. This must always
3906 be exact, otherwise we don't know how to verify the constraint.
3907 These conditions may be relaxed but subreg_regno_offset would
3908 need to be redesigned. */
3909 poly_uint64 bytes_per_block
= exact_div (xsize
, num_blocks
);
3911 /* Get the number of the first block that contains the subreg and the byte
3912 offset of the subreg from the start of that block. */
3913 unsigned int block_number
;
3914 poly_uint64 subblock_offset
;
3915 if (!can_div_trunc_p (offset
, bytes_per_block
, &block_number
,
3917 /* Checked by validate_subreg. We must know at compile time which
3918 inner registers are being accessed. */
3923 /* Only the lowpart of each block is representable. */
3924 info
->representable_p
3925 = known_eq (subblock_offset
,
3926 subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3930 /* We assume that the ordering of registers within a multi-register
3931 value has a consistent endianness: if bytes and register words
3932 have different endianness, the hard registers that make up a
3933 multi-register value must be at least word-sized. */
3934 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3935 /* The block number we calculated above followed memory endianness.
3936 Convert it to register endianness by counting back from the end.
3937 (Note that, because of the assumption above, each block must be
3938 at least word-sized.) */
3939 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3941 info
->offset
= block_number
* nregs_ymode
;
3942 info
->nregs
= nregs_ymode
;
3945 /* This function returns the regno offset of a subreg expression.
3946 xregno - A regno of an inner hard subreg_reg (or what will become one).
3947 xmode - The mode of xregno.
3948 offset - The byte offset.
3949 ymode - The mode of a top level SUBREG (or what may become one).
3950 RETURN - The regno offset which would be used. */
3952 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3953 poly_uint64 offset
, machine_mode ymode
)
3955 struct subreg_info info
;
3956 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3960 /* This function returns true when the offset is representable via
3961 subreg_offset in the given regno.
3962 xregno - A regno of an inner hard subreg_reg (or what will become one).
3963 xmode - The mode of xregno.
3964 offset - The byte offset.
3965 ymode - The mode of a top level SUBREG (or what may become one).
3966 RETURN - Whether the offset is representable. */
3968 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3969 poly_uint64 offset
, machine_mode ymode
)
3971 struct subreg_info info
;
3972 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3973 return info
.representable_p
;
3976 /* Return the number of a YMODE register to which
3978 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3980 can be simplified. Return -1 if the subreg can't be simplified.
3982 XREGNO is a hard register number. */
3985 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3986 poly_uint64 offset
, machine_mode ymode
)
3988 struct subreg_info info
;
3989 unsigned int yregno
;
3991 /* Give the backend a chance to disallow the mode change. */
3992 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3993 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3994 && !REG_CAN_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3995 /* We can use mode change in LRA for some transformations. */
3996 && ! lra_in_progress
)
3999 /* We shouldn't simplify stack-related registers. */
4000 if ((!reload_completed
|| frame_pointer_needed
)
4001 && xregno
== FRAME_POINTER_REGNUM
)
4004 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
4005 && xregno
== ARG_POINTER_REGNUM
)
4008 if (xregno
== STACK_POINTER_REGNUM
4009 /* We should convert hard stack register in LRA if it is
4011 && ! lra_in_progress
)
4014 /* Try to get the register offset. */
4015 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
4016 if (!info
.representable_p
)
4019 /* Make sure that the offsetted register value is in range. */
4020 yregno
= xregno
+ info
.offset
;
4021 if (!HARD_REGISTER_NUM_P (yregno
))
4024 /* See whether (reg:YMODE YREGNO) is valid.
4026 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4027 This is a kludge to work around how complex FP arguments are passed
4028 on IA-64 and should be fixed. See PR target/49226. */
4029 if (!targetm
.hard_regno_mode_ok (yregno
, ymode
)
4030 && targetm
.hard_regno_mode_ok (xregno
, xmode
))
4033 return (int) yregno
;
4036 /* Return the final regno that a subreg expression refers to. */
4038 subreg_regno (const_rtx x
)
4041 rtx subreg
= SUBREG_REG (x
);
4042 int regno
= REGNO (subreg
);
4044 ret
= regno
+ subreg_regno_offset (regno
,
4052 /* Return the number of registers that a subreg expression refers
4055 subreg_nregs (const_rtx x
)
4057 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
4060 /* Return the number of registers that a subreg REG with REGNO
4061 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4062 changed so that the regno can be passed in. */
4065 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
4067 struct subreg_info info
;
4068 rtx subreg
= SUBREG_REG (x
);
4070 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
4075 struct parms_set_data
4081 /* Helper function for noticing stores to parameter registers. */
4083 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
4085 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
4086 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4087 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
4089 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
4094 /* Look backward for first parameter to be loaded.
4095 Note that loads of all parameters will not necessarily be
4096 found if CSE has eliminated some of them (e.g., an argument
4097 to the outer function is passed down as a parameter).
4098 Do not skip BOUNDARY. */
4100 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
4102 struct parms_set_data parm
;
4104 rtx_insn
*before
, *first_set
;
4106 /* Since different machines initialize their parameter registers
4107 in different orders, assume nothing. Collect the set of all
4108 parameter registers. */
4109 CLEAR_HARD_REG_SET (parm
.regs
);
4111 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
4112 if (GET_CODE (XEXP (p
, 0)) == USE
4113 && REG_P (XEXP (XEXP (p
, 0), 0))
4114 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
4116 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
4118 /* We only care about registers which can hold function
4120 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
4123 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
4127 first_set
= call_insn
;
4129 /* Search backward for the first set of a register in this set. */
4130 while (parm
.nregs
&& before
!= boundary
)
4132 before
= PREV_INSN (before
);
4134 /* It is possible that some loads got CSEed from one call to
4135 another. Stop in that case. */
4136 if (CALL_P (before
))
4139 /* Our caller needs either ensure that we will find all sets
4140 (in case code has not been optimized yet), or take care
4141 for possible labels in a way by setting boundary to preceding
4143 if (LABEL_P (before
))
4145 gcc_assert (before
== boundary
);
4149 if (INSN_P (before
))
4151 int nregs_old
= parm
.nregs
;
4152 note_stores (before
, parms_set
, &parm
);
4153 /* If we found something that did not set a parameter reg,
4154 we're done. Do not keep going, as that might result
4155 in hoisting an insn before the setting of a pseudo
4156 that is used by the hoisted insn. */
4157 if (nregs_old
!= parm
.nregs
)
4166 /* Return true if we should avoid inserting code between INSN and preceding
4167 call instruction. */
4170 keep_with_call_p (const rtx_insn
*insn
)
4174 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4176 if (REG_P (SET_DEST (set
))
4177 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4178 && fixed_regs
[REGNO (SET_DEST (set
))]
4179 && general_operand (SET_SRC (set
), VOIDmode
))
4181 if (REG_P (SET_SRC (set
))
4182 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4183 && REG_P (SET_DEST (set
))
4184 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4186 /* There may be a stack pop just after the call and before the store
4187 of the return register. Search for the actual store when deciding
4188 if we can break or not. */
4189 if (SET_DEST (set
) == stack_pointer_rtx
)
4191 /* This CONST_CAST is okay because next_nonnote_insn just
4192 returns its argument and we assign it to a const_rtx
4195 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4196 if (i2
&& keep_with_call_p (i2
))
4203 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4204 to non-complex jumps. That is, direct unconditional, conditional,
4205 and tablejumps, but not computed jumps or returns. It also does
4206 not apply to the fallthru case of a conditional jump. */
4209 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4211 rtx tmp
= JUMP_LABEL (jump_insn
);
4212 rtx_jump_table_data
*table
;
4217 if (tablejump_p (jump_insn
, NULL
, &table
))
4219 rtvec vec
= table
->get_labels ();
4220 int i
, veclen
= GET_NUM_ELEM (vec
);
4222 for (i
= 0; i
< veclen
; ++i
)
4223 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4227 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4234 /* Return an estimate of the cost of computing rtx X.
4235 One use is in cse, to decide which expression to keep in the hash table.
4236 Another is in rtl generation, to pick the cheapest way to multiply.
4237 Other uses like the latter are expected in the future.
4239 X appears as operand OPNO in an expression with code OUTER_CODE.
4240 SPEED specifies whether costs optimized for speed or size should
4244 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4245 int opno
, bool speed
)
4256 if (GET_MODE (x
) != VOIDmode
)
4257 mode
= GET_MODE (x
);
4259 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4260 many insns, taking N times as long. */
4261 factor
= estimated_poly_value (GET_MODE_SIZE (mode
)) / UNITS_PER_WORD
;
4265 /* Compute the default costs of certain things.
4266 Note that targetm.rtx_costs can override the defaults. */
4268 code
= GET_CODE (x
);
4272 /* Multiplication has time-complexity O(N*N), where N is the
4273 number of units (translated from digits) when using
4274 schoolbook long multiplication. */
4275 total
= factor
* factor
* COSTS_N_INSNS (5);
4281 /* Similarly, complexity for schoolbook long division. */
4282 total
= factor
* factor
* COSTS_N_INSNS (7);
4285 /* Used in combine.c as a marker. */
4289 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4290 the mode for the factor. */
4291 mode
= GET_MODE (SET_DEST (x
));
4292 factor
= estimated_poly_value (GET_MODE_SIZE (mode
)) / UNITS_PER_WORD
;
4297 total
= factor
* COSTS_N_INSNS (1);
4307 /* If we can't tie these modes, make this expensive. The larger
4308 the mode, the more expensive it is. */
4309 if (!targetm
.modes_tieable_p (mode
, GET_MODE (SUBREG_REG (x
))))
4310 return COSTS_N_INSNS (2 + factor
);
4314 if (targetm
.modes_tieable_p (mode
, GET_MODE (XEXP (x
, 0))))
4321 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4326 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4327 which is already in total. */
4329 fmt
= GET_RTX_FORMAT (code
);
4330 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4332 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4333 else if (fmt
[i
] == 'E')
4334 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4335 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4340 /* Fill in the structure C with information about both speed and size rtx
4341 costs for X, which is operand OPNO in an expression with code OUTER. */
4344 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4345 struct full_rtx_costs
*c
)
4347 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4348 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4352 /* Return cost of address expression X.
4353 Expect that X is properly formed address reference.
4355 SPEED parameter specify whether costs optimized for speed or size should
4359 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4361 /* We may be asked for cost of various unusual addresses, such as operands
4362 of push instruction. It is not worthwhile to complicate writing
4363 of the target hook by such cases. */
4365 if (!memory_address_addr_space_p (mode
, x
, as
))
4368 return targetm
.address_cost (x
, mode
, as
, speed
);
4371 /* If the target doesn't override, compute the cost as with arithmetic. */
4374 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4376 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4380 unsigned HOST_WIDE_INT
4381 nonzero_bits (const_rtx x
, machine_mode mode
)
4383 if (mode
== VOIDmode
)
4384 mode
= GET_MODE (x
);
4385 scalar_int_mode int_mode
;
4386 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4387 return GET_MODE_MASK (mode
);
4388 return cached_nonzero_bits (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4392 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4394 if (mode
== VOIDmode
)
4395 mode
= GET_MODE (x
);
4396 scalar_int_mode int_mode
;
4397 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4399 return cached_num_sign_bit_copies (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4402 /* Return true if nonzero_bits1 might recurse into both operands
4406 nonzero_bits_binary_arith_p (const_rtx x
)
4408 if (!ARITHMETIC_P (x
))
4410 switch (GET_CODE (x
))
4432 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4433 It avoids exponential behavior in nonzero_bits1 when X has
4434 identical subexpressions on the first or the second level. */
4436 static unsigned HOST_WIDE_INT
4437 cached_nonzero_bits (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4438 machine_mode known_mode
,
4439 unsigned HOST_WIDE_INT known_ret
)
4441 if (x
== known_x
&& mode
== known_mode
)
4444 /* Try to find identical subexpressions. If found call
4445 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4446 precomputed value for the subexpression as KNOWN_RET. */
4448 if (nonzero_bits_binary_arith_p (x
))
4450 rtx x0
= XEXP (x
, 0);
4451 rtx x1
= XEXP (x
, 1);
4453 /* Check the first level. */
4455 return nonzero_bits1 (x
, mode
, x0
, mode
,
4456 cached_nonzero_bits (x0
, mode
, known_x
,
4457 known_mode
, known_ret
));
4459 /* Check the second level. */
4460 if (nonzero_bits_binary_arith_p (x0
)
4461 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4462 return nonzero_bits1 (x
, mode
, x1
, mode
,
4463 cached_nonzero_bits (x1
, mode
, known_x
,
4464 known_mode
, known_ret
));
4466 if (nonzero_bits_binary_arith_p (x1
)
4467 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4468 return nonzero_bits1 (x
, mode
, x0
, mode
,
4469 cached_nonzero_bits (x0
, mode
, known_x
,
4470 known_mode
, known_ret
));
4473 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4476 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4477 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4478 is less useful. We can't allow both, because that results in exponential
4479 run time recursion. There is a nullstone testcase that triggered
4480 this. This macro avoids accidental uses of num_sign_bit_copies. */
4481 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4483 /* Given an expression, X, compute which bits in X can be nonzero.
4484 We don't care about bits outside of those defined in MODE.
4486 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4487 an arithmetic operation, we can do better. */
4489 static unsigned HOST_WIDE_INT
4490 nonzero_bits1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4491 machine_mode known_mode
,
4492 unsigned HOST_WIDE_INT known_ret
)
4494 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4495 unsigned HOST_WIDE_INT inner_nz
;
4496 enum rtx_code code
= GET_CODE (x
);
4497 machine_mode inner_mode
;
4498 unsigned int inner_width
;
4499 scalar_int_mode xmode
;
4501 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4503 if (CONST_INT_P (x
))
4505 if (SHORT_IMMEDIATES_SIGN_EXTEND
4507 && mode_width
< BITS_PER_WORD
4508 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1))) != 0)
4509 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4514 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4516 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4518 /* If X is wider than MODE, use its mode instead. */
4519 if (xmode_width
> mode_width
)
4522 nonzero
= GET_MODE_MASK (mode
);
4523 mode_width
= xmode_width
;
4526 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4527 /* Our only callers in this case look for single bit values. So
4528 just return the mode mask. Those tests will then be false. */
4531 /* If MODE is wider than X, but both are a single word for both the host
4532 and target machines, we can compute this from which bits of the object
4533 might be nonzero in its own mode, taking into account the fact that, on
4534 CISC machines, accessing an object in a wider mode generally causes the
4535 high-order bits to become undefined, so they are not known to be zero.
4536 We extend this reasoning to RISC machines for operations that might not
4537 operate on the full registers. */
4538 if (mode_width
> xmode_width
4539 && xmode_width
<= BITS_PER_WORD
4540 && xmode_width
<= HOST_BITS_PER_WIDE_INT
4541 && !(WORD_REGISTER_OPERATIONS
&& word_register_operation_p (x
)))
4543 nonzero
&= cached_nonzero_bits (x
, xmode
,
4544 known_x
, known_mode
, known_ret
);
4545 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
);
4549 /* Please keep nonzero_bits_binary_arith_p above in sync with
4550 the code in the switch below. */
4554 #if defined(POINTERS_EXTEND_UNSIGNED)
4555 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4556 all the bits above ptr_mode are known to be zero. */
4557 /* As we do not know which address space the pointer is referring to,
4558 we can do this only if the target does not support different pointer
4559 or address modes depending on the address space. */
4560 if (target_default_pointer_address_modes_p ()
4561 && POINTERS_EXTEND_UNSIGNED
4564 && !targetm
.have_ptr_extend ())
4565 nonzero
&= GET_MODE_MASK (ptr_mode
);
4568 /* Include declared information about alignment of pointers. */
4569 /* ??? We don't properly preserve REG_POINTER changes across
4570 pointer-to-integer casts, so we can't trust it except for
4571 things that we know must be pointers. See execute/960116-1.c. */
4572 if ((x
== stack_pointer_rtx
4573 || x
== frame_pointer_rtx
4574 || x
== arg_pointer_rtx
)
4575 && REGNO_POINTER_ALIGN (REGNO (x
)))
4577 unsigned HOST_WIDE_INT alignment
4578 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4580 #ifdef PUSH_ROUNDING
4581 /* If PUSH_ROUNDING is defined, it is possible for the
4582 stack to be momentarily aligned only to that amount,
4583 so we pick the least alignment. */
4584 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4586 poly_uint64 rounded_1
= PUSH_ROUNDING (poly_int64 (1));
4587 alignment
= MIN (known_alignment (rounded_1
), alignment
);
4591 nonzero
&= ~(alignment
- 1);
4595 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4596 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, xmode
, mode
,
4600 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4601 known_mode
, known_ret
);
4603 return nonzero_for_hook
;
4607 /* In many, if not most, RISC machines, reading a byte from memory
4608 zeros the rest of the register. Noticing that fact saves a lot
4609 of extra zero-extends. */
4610 if (load_extend_op (xmode
) == ZERO_EXTEND
)
4611 nonzero
&= GET_MODE_MASK (xmode
);
4615 case UNEQ
: case LTGT
:
4616 case GT
: case GTU
: case UNGT
:
4617 case LT
: case LTU
: case UNLT
:
4618 case GE
: case GEU
: case UNGE
:
4619 case LE
: case LEU
: case UNLE
:
4620 case UNORDERED
: case ORDERED
:
4621 /* If this produces an integer result, we know which bits are set.
4622 Code here used to clear bits outside the mode of X, but that is
4624 /* Mind that MODE is the mode the caller wants to look at this
4625 operation in, and not the actual operation mode. We can wind
4626 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4627 that describes the results of a vector compare. */
4628 if (GET_MODE_CLASS (xmode
) == MODE_INT
4629 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4630 nonzero
= STORE_FLAG_VALUE
;
4635 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4636 and num_sign_bit_copies. */
4637 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4641 if (xmode_width
< mode_width
)
4642 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
));
4647 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4648 and num_sign_bit_copies. */
4649 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4655 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4656 known_x
, known_mode
, known_ret
)
4657 & GET_MODE_MASK (mode
));
4661 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4662 known_x
, known_mode
, known_ret
);
4663 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4664 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4668 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4669 Otherwise, show all the bits in the outer mode but not the inner
4671 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4672 known_x
, known_mode
, known_ret
);
4673 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4675 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4676 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4677 inner_nz
|= (GET_MODE_MASK (mode
)
4678 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4681 nonzero
&= inner_nz
;
4685 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4686 known_x
, known_mode
, known_ret
)
4687 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4688 known_x
, known_mode
, known_ret
);
4692 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4694 unsigned HOST_WIDE_INT nonzero0
4695 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4696 known_x
, known_mode
, known_ret
);
4698 /* Don't call nonzero_bits for the second time if it cannot change
4700 if ((nonzero
& nonzero0
) != nonzero
)
4702 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4703 known_x
, known_mode
, known_ret
);
4707 case PLUS
: case MINUS
:
4709 case DIV
: case UDIV
:
4710 case MOD
: case UMOD
:
4711 /* We can apply the rules of arithmetic to compute the number of
4712 high- and low-order zero bits of these operations. We start by
4713 computing the width (position of the highest-order nonzero bit)
4714 and the number of low-order zero bits for each value. */
4716 unsigned HOST_WIDE_INT nz0
4717 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4718 known_x
, known_mode
, known_ret
);
4719 unsigned HOST_WIDE_INT nz1
4720 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4721 known_x
, known_mode
, known_ret
);
4722 int sign_index
= xmode_width
- 1;
4723 int width0
= floor_log2 (nz0
) + 1;
4724 int width1
= floor_log2 (nz1
) + 1;
4725 int low0
= ctz_or_zero (nz0
);
4726 int low1
= ctz_or_zero (nz1
);
4727 unsigned HOST_WIDE_INT op0_maybe_minusp
4728 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4729 unsigned HOST_WIDE_INT op1_maybe_minusp
4730 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4731 unsigned int result_width
= mode_width
;
4737 result_width
= MAX (width0
, width1
) + 1;
4738 result_low
= MIN (low0
, low1
);
4741 result_low
= MIN (low0
, low1
);
4744 result_width
= width0
+ width1
;
4745 result_low
= low0
+ low1
;
4750 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4751 result_width
= width0
;
4756 result_width
= width0
;
4761 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4762 result_width
= MIN (width0
, width1
);
4763 result_low
= MIN (low0
, low1
);
4768 result_width
= MIN (width0
, width1
);
4769 result_low
= MIN (low0
, low1
);
4775 if (result_width
< mode_width
)
4776 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4779 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4784 if (CONST_INT_P (XEXP (x
, 1))
4785 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4786 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4790 /* If this is a SUBREG formed for a promoted variable that has
4791 been zero-extended, we know that at least the high-order bits
4792 are zero, though others might be too. */
4793 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4794 nonzero
= GET_MODE_MASK (xmode
)
4795 & cached_nonzero_bits (SUBREG_REG (x
), xmode
,
4796 known_x
, known_mode
, known_ret
);
4798 /* If the inner mode is a single word for both the host and target
4799 machines, we can compute this from which bits of the inner
4800 object might be nonzero. */
4801 inner_mode
= GET_MODE (SUBREG_REG (x
));
4802 if (GET_MODE_PRECISION (inner_mode
).is_constant (&inner_width
)
4803 && inner_width
<= BITS_PER_WORD
4804 && inner_width
<= HOST_BITS_PER_WIDE_INT
)
4806 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4807 known_x
, known_mode
, known_ret
);
4809 /* On a typical CISC machine, accessing an object in a wider mode
4810 causes the high-order bits to become undefined. So they are
4811 not known to be zero.
4813 On a typical RISC machine, we only have to worry about the way
4814 loads are extended. Otherwise, if we get a reload for the inner
4815 part, it may be loaded from the stack, and then we may lose all
4816 the zero bits that existed before the store to the stack. */
4818 if ((!WORD_REGISTER_OPERATIONS
4819 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4820 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4821 : extend_op
!= ZERO_EXTEND
)
4822 || !MEM_P (SUBREG_REG (x
)))
4823 && xmode_width
> inner_width
)
4825 |= (GET_MODE_MASK (GET_MODE (x
)) & ~GET_MODE_MASK (inner_mode
));
4834 /* The nonzero bits are in two classes: any bits within MODE
4835 that aren't in xmode are always significant. The rest of the
4836 nonzero bits are those that are significant in the operand of
4837 the shift when shifted the appropriate number of bits. This
4838 shows that high-order bits are cleared by the right shift and
4839 low-order bits by left shifts. */
4840 if (CONST_INT_P (XEXP (x
, 1))
4841 && INTVAL (XEXP (x
, 1)) >= 0
4842 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4843 && INTVAL (XEXP (x
, 1)) < xmode_width
)
4845 int count
= INTVAL (XEXP (x
, 1));
4846 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (xmode
);
4847 unsigned HOST_WIDE_INT op_nonzero
4848 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4849 known_x
, known_mode
, known_ret
);
4850 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4851 unsigned HOST_WIDE_INT outer
= 0;
4853 if (mode_width
> xmode_width
)
4854 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4869 /* If the sign bit may have been nonzero before the shift, we
4870 need to mark all the places it could have been copied to
4871 by the shift as possibly nonzero. */
4872 if (inner
& (HOST_WIDE_INT_1U
<< (xmode_width
- 1 - count
)))
4873 inner
|= (((HOST_WIDE_INT_1U
<< count
) - 1)
4874 << (xmode_width
- count
));
4878 inner
= (inner
<< (count
% xmode_width
)
4879 | (inner
>> (xmode_width
- (count
% xmode_width
))))
4884 inner
= (inner
>> (count
% xmode_width
)
4885 | (inner
<< (xmode_width
- (count
% xmode_width
))))
4893 nonzero
&= (outer
| inner
);
4899 /* This is at most the number of bits in the mode. */
4900 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4904 /* If CLZ has a known value at zero, then the nonzero bits are
4905 that value, plus the number of bits in the mode minus one. */
4906 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4908 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4914 /* If CTZ has a known value at zero, then the nonzero bits are
4915 that value, plus the number of bits in the mode minus one. */
4916 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4918 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4924 /* This is at most the number of bits in the mode minus 1. */
4925 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4934 unsigned HOST_WIDE_INT nonzero_true
4935 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4936 known_x
, known_mode
, known_ret
);
4938 /* Don't call nonzero_bits for the second time if it cannot change
4940 if ((nonzero
& nonzero_true
) != nonzero
)
4941 nonzero
&= nonzero_true
4942 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4943 known_x
, known_mode
, known_ret
);
4954 /* See the macro definition above. */
4955 #undef cached_num_sign_bit_copies
4958 /* Return true if num_sign_bit_copies1 might recurse into both operands
4962 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4964 if (!ARITHMETIC_P (x
))
4966 switch (GET_CODE (x
))
4984 /* The function cached_num_sign_bit_copies is a wrapper around
4985 num_sign_bit_copies1. It avoids exponential behavior in
4986 num_sign_bit_copies1 when X has identical subexpressions on the
4987 first or the second level. */
4990 cached_num_sign_bit_copies (const_rtx x
, scalar_int_mode mode
,
4991 const_rtx known_x
, machine_mode known_mode
,
4992 unsigned int known_ret
)
4994 if (x
== known_x
&& mode
== known_mode
)
4997 /* Try to find identical subexpressions. If found call
4998 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4999 the precomputed value for the subexpression as KNOWN_RET. */
5001 if (num_sign_bit_copies_binary_arith_p (x
))
5003 rtx x0
= XEXP (x
, 0);
5004 rtx x1
= XEXP (x
, 1);
5006 /* Check the first level. */
5009 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
5010 cached_num_sign_bit_copies (x0
, mode
, known_x
,
5014 /* Check the second level. */
5015 if (num_sign_bit_copies_binary_arith_p (x0
)
5016 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
5018 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
5019 cached_num_sign_bit_copies (x1
, mode
, known_x
,
5023 if (num_sign_bit_copies_binary_arith_p (x1
)
5024 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
5026 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
5027 cached_num_sign_bit_copies (x0
, mode
, known_x
,
5032 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
5035 /* Return the number of bits at the high-order end of X that are known to
5036 be equal to the sign bit. X will be used in mode MODE. The returned
5037 value will always be between 1 and the number of bits in MODE. */
5040 num_sign_bit_copies1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
5041 machine_mode known_mode
,
5042 unsigned int known_ret
)
5044 enum rtx_code code
= GET_CODE (x
);
5045 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
5046 int num0
, num1
, result
;
5047 unsigned HOST_WIDE_INT nonzero
;
5049 if (CONST_INT_P (x
))
5051 /* If the constant is negative, take its 1's complement and remask.
5052 Then see how many zero bits we have. */
5053 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
5054 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5055 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5056 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5058 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5061 scalar_int_mode xmode
, inner_mode
;
5062 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
5065 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
5067 /* For a smaller mode, just ignore the high bits. */
5068 if (bitwidth
< xmode_width
)
5070 num0
= cached_num_sign_bit_copies (x
, xmode
,
5071 known_x
, known_mode
, known_ret
);
5072 return MAX (1, num0
- (int) (xmode_width
- bitwidth
));
5075 if (bitwidth
> xmode_width
)
5077 /* If this machine does not do all register operations on the entire
5078 register and MODE is wider than the mode of X, we can say nothing
5079 at all about the high-order bits. We extend this reasoning to RISC
5080 machines for operations that might not operate on full registers. */
5081 if (!(WORD_REGISTER_OPERATIONS
&& word_register_operation_p (x
)))
5084 /* Likewise on machines that do, if the mode of the object is smaller
5085 than a word and loads of that size don't sign extend, we can say
5086 nothing about the high order bits. */
5087 if (xmode_width
< BITS_PER_WORD
5088 && load_extend_op (xmode
) != SIGN_EXTEND
)
5092 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5093 the code in the switch below. */
5098 #if defined(POINTERS_EXTEND_UNSIGNED)
5099 /* If pointers extend signed and this is a pointer in Pmode, say that
5100 all the bits above ptr_mode are known to be sign bit copies. */
5101 /* As we do not know which address space the pointer is referring to,
5102 we can do this only if the target does not support different pointer
5103 or address modes depending on the address space. */
5104 if (target_default_pointer_address_modes_p ()
5105 && ! POINTERS_EXTEND_UNSIGNED
&& xmode
== Pmode
5106 && mode
== Pmode
&& REG_POINTER (x
)
5107 && !targetm
.have_ptr_extend ())
5108 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
5112 unsigned int copies_for_hook
= 1, copies
= 1;
5113 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, xmode
, mode
,
5117 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
5118 known_mode
, known_ret
);
5120 if (copies
> 1 || copies_for_hook
> 1)
5121 return MAX (copies
, copies_for_hook
);
5123 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5128 /* Some RISC machines sign-extend all loads of smaller than a word. */
5129 if (load_extend_op (xmode
) == SIGN_EXTEND
)
5130 return MAX (1, ((int) bitwidth
- (int) xmode_width
+ 1));
5134 /* If this is a SUBREG for a promoted object that is sign-extended
5135 and we are looking at it in a wider mode, we know that at least the
5136 high-order bits are known to be sign bit copies. */
5138 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
5140 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5141 known_x
, known_mode
, known_ret
);
5142 return MAX ((int) bitwidth
- (int) xmode_width
+ 1, num0
);
5145 if (is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)), &inner_mode
))
5147 /* For a smaller object, just ignore the high bits. */
5148 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
5150 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), inner_mode
,
5151 known_x
, known_mode
,
5153 return MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5157 /* For paradoxical SUBREGs on machines where all register operations
5158 affect the entire register, just look inside. Note that we are
5159 passing MODE to the recursive call, so the number of sign bit
5160 copies will remain relative to that mode, not the inner mode.
5162 This works only if loads sign extend. Otherwise, if we get a
5163 reload for the inner part, it may be loaded from the stack, and
5164 then we lose all sign bit copies that existed before the store
5166 if (WORD_REGISTER_OPERATIONS
5167 && load_extend_op (inner_mode
) == SIGN_EXTEND
5168 && paradoxical_subreg_p (x
)
5169 && MEM_P (SUBREG_REG (x
)))
5170 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5171 known_x
, known_mode
, known_ret
);
5176 if (CONST_INT_P (XEXP (x
, 1)))
5177 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5181 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
5182 return (bitwidth
- GET_MODE_PRECISION (inner_mode
)
5183 + cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5184 known_x
, known_mode
, known_ret
));
5188 /* For a smaller object, just ignore the high bits. */
5189 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
5190 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5191 known_x
, known_mode
, known_ret
);
5192 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5196 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5197 known_x
, known_mode
, known_ret
);
5199 case ROTATE
: case ROTATERT
:
5200 /* If we are rotating left by a number of bits less than the number
5201 of sign bit copies, we can just subtract that amount from the
5203 if (CONST_INT_P (XEXP (x
, 1))
5204 && INTVAL (XEXP (x
, 1)) >= 0
5205 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5207 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5208 known_x
, known_mode
, known_ret
);
5209 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5210 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5215 /* In general, this subtracts one sign bit copy. But if the value
5216 is known to be positive, the number of sign bit copies is the
5217 same as that of the input. Finally, if the input has just one bit
5218 that might be nonzero, all the bits are copies of the sign bit. */
5219 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5220 known_x
, known_mode
, known_ret
);
5221 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5222 return num0
> 1 ? num0
- 1 : 1;
5224 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5229 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5234 case IOR
: case AND
: case XOR
:
5235 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5236 /* Logical operations will preserve the number of sign-bit copies.
5237 MIN and MAX operations always return one of the operands. */
5238 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5239 known_x
, known_mode
, known_ret
);
5240 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5241 known_x
, known_mode
, known_ret
);
5243 /* If num1 is clearing some of the top bits then regardless of
5244 the other term, we are guaranteed to have at least that many
5245 high-order zero bits. */
5248 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5249 && CONST_INT_P (XEXP (x
, 1))
5250 && (UINTVAL (XEXP (x
, 1))
5251 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5254 /* Similarly for IOR when setting high-order bits. */
5257 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5258 && CONST_INT_P (XEXP (x
, 1))
5259 && (UINTVAL (XEXP (x
, 1))
5260 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5263 return MIN (num0
, num1
);
5265 case PLUS
: case MINUS
:
5266 /* For addition and subtraction, we can have a 1-bit carry. However,
5267 if we are subtracting 1 from a positive number, there will not
5268 be such a carry. Furthermore, if the positive number is known to
5269 be 0 or 1, we know the result is either -1 or 0. */
5271 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5272 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5274 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5275 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5276 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5277 : bitwidth
- floor_log2 (nonzero
) - 1);
5280 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5281 known_x
, known_mode
, known_ret
);
5282 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5283 known_x
, known_mode
, known_ret
);
5284 result
= MAX (1, MIN (num0
, num1
) - 1);
5289 /* The number of bits of the product is the sum of the number of
5290 bits of both terms. However, unless one of the terms if known
5291 to be positive, we must allow for an additional bit since negating
5292 a negative number can remove one sign bit copy. */
5294 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5295 known_x
, known_mode
, known_ret
);
5296 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5297 known_x
, known_mode
, known_ret
);
5299 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5301 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5302 || (((nonzero_bits (XEXP (x
, 0), mode
)
5303 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5304 && ((nonzero_bits (XEXP (x
, 1), mode
)
5305 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5309 return MAX (1, result
);
5312 /* The result must be <= the first operand. If the first operand
5313 has the high bit set, we know nothing about the number of sign
5315 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5317 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5318 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5321 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5322 known_x
, known_mode
, known_ret
);
5325 /* The result must be <= the second operand. If the second operand
5326 has (or just might have) the high bit set, we know nothing about
5327 the number of sign bit copies. */
5328 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5330 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5331 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5334 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5335 known_x
, known_mode
, known_ret
);
5338 /* Similar to unsigned division, except that we have to worry about
5339 the case where the divisor is negative, in which case we have
5341 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5342 known_x
, known_mode
, known_ret
);
5344 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5345 || (nonzero_bits (XEXP (x
, 1), mode
)
5346 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5352 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5353 known_x
, known_mode
, known_ret
);
5355 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5356 || (nonzero_bits (XEXP (x
, 1), mode
)
5357 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5363 /* Shifts by a constant add to the number of bits equal to the
5365 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5366 known_x
, known_mode
, known_ret
);
5367 if (CONST_INT_P (XEXP (x
, 1))
5368 && INTVAL (XEXP (x
, 1)) > 0
5369 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5370 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5375 /* Left shifts destroy copies. */
5376 if (!CONST_INT_P (XEXP (x
, 1))
5377 || INTVAL (XEXP (x
, 1)) < 0
5378 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5379 || INTVAL (XEXP (x
, 1)) >= xmode_width
)
5382 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5383 known_x
, known_mode
, known_ret
);
5384 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5387 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5388 known_x
, known_mode
, known_ret
);
5389 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5390 known_x
, known_mode
, known_ret
);
5391 return MIN (num0
, num1
);
5393 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5394 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5395 case GEU
: case GTU
: case LEU
: case LTU
:
5396 case UNORDERED
: case ORDERED
:
5397 /* If the constant is negative, take its 1's complement and remask.
5398 Then see how many zero bits we have. */
5399 nonzero
= STORE_FLAG_VALUE
;
5400 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5401 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5402 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5404 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5410 /* If we haven't been able to figure it out by one of the above rules,
5411 see if some of the high-order bits are known to be zero. If so,
5412 count those bits and return one less than that amount. If we can't
5413 safely compute the mask for this mode, always return BITWIDTH. */
5415 bitwidth
= GET_MODE_PRECISION (mode
);
5416 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5419 nonzero
= nonzero_bits (x
, mode
);
5420 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5421 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5424 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5425 zero indicates an instruction pattern without a known cost. */
5428 pattern_cost (rtx pat
, bool speed
)
5433 /* Extract the single set rtx from the instruction pattern. We
5434 can't use single_set since we only have the pattern. We also
5435 consider PARALLELs of a normal set and a single comparison. In
5436 that case we use the cost of the non-comparison SET operation,
5437 which is most-likely to be the real cost of this operation. */
5438 if (GET_CODE (pat
) == SET
)
5440 else if (GET_CODE (pat
) == PARALLEL
)
5443 rtx comparison
= NULL_RTX
;
5445 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5447 rtx x
= XVECEXP (pat
, 0, i
);
5448 if (GET_CODE (x
) == SET
)
5450 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5465 if (!set
&& comparison
)
5474 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5475 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5478 /* Calculate the cost of a single instruction. A return value of zero
5479 indicates an instruction pattern without a known cost. */
5482 insn_cost (rtx_insn
*insn
, bool speed
)
5484 if (targetm
.insn_cost
)
5485 return targetm
.insn_cost (insn
, speed
);
5487 return pattern_cost (PATTERN (insn
), speed
);
5490 /* Returns estimate on cost of computing SEQ. */
5493 seq_cost (const rtx_insn
*seq
, bool speed
)
5498 for (; seq
; seq
= NEXT_INSN (seq
))
5500 set
= single_set (seq
);
5502 cost
+= set_rtx_cost (set
, speed
);
5503 else if (NONDEBUG_INSN_P (seq
))
5505 int this_cost
= insn_cost (CONST_CAST_RTX_INSN (seq
), speed
);
5516 /* Given an insn INSN and condition COND, return the condition in a
5517 canonical form to simplify testing by callers. Specifically:
5519 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5520 (2) Both operands will be machine operands; (cc0) will have been replaced.
5521 (3) If an operand is a constant, it will be the second operand.
5522 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5523 for GE, GEU, and LEU.
5525 If the condition cannot be understood, or is an inequality floating-point
5526 comparison which needs to be reversed, 0 will be returned.
5528 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5530 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5531 insn used in locating the condition was found. If a replacement test
5532 of the condition is desired, it should be placed in front of that
5533 insn and we will be sure that the inputs are still valid.
5535 If WANT_REG is nonzero, we wish the condition to be relative to that
5536 register, if possible. Therefore, do not canonicalize the condition
5537 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5538 to be a compare to a CC mode register.
5540 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5544 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5545 rtx_insn
**earliest
,
5546 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5549 rtx_insn
*prev
= insn
;
5553 int reverse_code
= 0;
5555 basic_block bb
= BLOCK_FOR_INSN (insn
);
5557 code
= GET_CODE (cond
);
5558 mode
= GET_MODE (cond
);
5559 op0
= XEXP (cond
, 0);
5560 op1
= XEXP (cond
, 1);
5563 code
= reversed_comparison_code (cond
, insn
);
5564 if (code
== UNKNOWN
)
5570 /* If we are comparing a register with zero, see if the register is set
5571 in the previous insn to a COMPARE or a comparison operation. Perform
5572 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5575 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5576 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5577 && op1
== CONST0_RTX (GET_MODE (op0
))
5580 /* Set nonzero when we find something of interest. */
5583 /* If comparison with cc0, import actual comparison from compare
5587 if ((prev
= prev_nonnote_insn (prev
)) == 0
5588 || !NONJUMP_INSN_P (prev
)
5589 || (set
= single_set (prev
)) == 0
5590 || SET_DEST (set
) != cc0_rtx
)
5593 op0
= SET_SRC (set
);
5594 op1
= CONST0_RTX (GET_MODE (op0
));
5599 /* If this is a COMPARE, pick up the two things being compared. */
5600 if (GET_CODE (op0
) == COMPARE
)
5602 op1
= XEXP (op0
, 1);
5603 op0
= XEXP (op0
, 0);
5606 else if (!REG_P (op0
))
5609 /* Go back to the previous insn. Stop if it is not an INSN. We also
5610 stop if it isn't a single set or if it has a REG_INC note because
5611 we don't want to bother dealing with it. */
5613 prev
= prev_nonnote_nondebug_insn (prev
);
5616 || !NONJUMP_INSN_P (prev
)
5617 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5618 /* In cfglayout mode, there do not have to be labels at the
5619 beginning of a block, or jumps at the end, so the previous
5620 conditions would not stop us when we reach bb boundary. */
5621 || BLOCK_FOR_INSN (prev
) != bb
)
5624 set
= set_of (op0
, prev
);
5627 && (GET_CODE (set
) != SET
5628 || !rtx_equal_p (SET_DEST (set
), op0
)))
5631 /* If this is setting OP0, get what it sets it to if it looks
5635 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5636 #ifdef FLOAT_STORE_FLAG_VALUE
5637 REAL_VALUE_TYPE fsfv
;
5640 /* ??? We may not combine comparisons done in a CCmode with
5641 comparisons not done in a CCmode. This is to aid targets
5642 like Alpha that have an IEEE compliant EQ instruction, and
5643 a non-IEEE compliant BEQ instruction. The use of CCmode is
5644 actually artificial, simply to prevent the combination, but
5645 should not affect other platforms.
5647 However, we must allow VOIDmode comparisons to match either
5648 CCmode or non-CCmode comparison, because some ports have
5649 modeless comparisons inside branch patterns.
5651 ??? This mode check should perhaps look more like the mode check
5652 in simplify_comparison in combine. */
5653 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5654 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5656 && inner_mode
!= VOIDmode
)
5658 if (GET_CODE (SET_SRC (set
)) == COMPARE
5661 && val_signbit_known_set_p (inner_mode
,
5663 #ifdef FLOAT_STORE_FLAG_VALUE
5665 && SCALAR_FLOAT_MODE_P (inner_mode
)
5666 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5667 REAL_VALUE_NEGATIVE (fsfv
)))
5670 && COMPARISON_P (SET_SRC (set
))))
5672 else if (((code
== EQ
5674 && val_signbit_known_set_p (inner_mode
,
5676 #ifdef FLOAT_STORE_FLAG_VALUE
5678 && SCALAR_FLOAT_MODE_P (inner_mode
)
5679 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5680 REAL_VALUE_NEGATIVE (fsfv
)))
5683 && COMPARISON_P (SET_SRC (set
)))
5688 else if ((code
== EQ
|| code
== NE
)
5689 && GET_CODE (SET_SRC (set
)) == XOR
)
5690 /* Handle sequences like:
5693 ...(eq|ne op0 (const_int 0))...
5697 (eq op0 (const_int 0)) reduces to (eq X Y)
5698 (ne op0 (const_int 0)) reduces to (ne X Y)
5700 This is the form used by MIPS16, for example. */
5706 else if (reg_set_p (op0
, prev
))
5707 /* If this sets OP0, but not directly, we have to give up. */
5712 /* If the caller is expecting the condition to be valid at INSN,
5713 make sure X doesn't change before INSN. */
5714 if (valid_at_insn_p
)
5715 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5717 if (COMPARISON_P (x
))
5718 code
= GET_CODE (x
);
5721 code
= reversed_comparison_code (x
, prev
);
5722 if (code
== UNKNOWN
)
5727 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5733 /* If constant is first, put it last. */
5734 if (CONSTANT_P (op0
))
5735 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5737 /* If OP0 is the result of a comparison, we weren't able to find what
5738 was really being compared, so fail. */
5740 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5743 /* Canonicalize any ordered comparison with integers involving equality
5744 if we can do computations in the relevant mode and we do not
5747 scalar_int_mode op0_mode
;
5748 if (CONST_INT_P (op1
)
5749 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &op0_mode
)
5750 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
)
5752 HOST_WIDE_INT const_val
= INTVAL (op1
);
5753 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5754 unsigned HOST_WIDE_INT max_val
5755 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (op0_mode
);
5760 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5761 code
= LT
, op1
= gen_int_mode (const_val
+ 1, op0_mode
);
5764 /* When cross-compiling, const_val might be sign-extended from
5765 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5767 if ((const_val
& max_val
)
5768 != (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (op0_mode
) - 1)))
5769 code
= GT
, op1
= gen_int_mode (const_val
- 1, op0_mode
);
5773 if (uconst_val
< max_val
)
5774 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, op0_mode
);
5778 if (uconst_val
!= 0)
5779 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, op0_mode
);
5787 /* Never return CC0; return zero instead. */
5791 /* We promised to return a comparison. */
5792 rtx ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5793 if (COMPARISON_P (ret
))
5798 /* Given a jump insn JUMP, return the condition that will cause it to branch
5799 to its JUMP_LABEL. If the condition cannot be understood, or is an
5800 inequality floating-point comparison which needs to be reversed, 0 will
5803 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5804 insn used in locating the condition was found. If a replacement test
5805 of the condition is desired, it should be placed in front of that
5806 insn and we will be sure that the inputs are still valid. If EARLIEST
5807 is null, the returned condition will be valid at INSN.
5809 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5810 compare CC mode register.
5812 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5815 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5816 int valid_at_insn_p
)
5822 /* If this is not a standard conditional jump, we can't parse it. */
5824 || ! any_condjump_p (jump
))
5826 set
= pc_set (jump
);
5828 cond
= XEXP (SET_SRC (set
), 0);
5830 /* If this branches to JUMP_LABEL when the condition is false, reverse
5833 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5834 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5836 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5837 allow_cc_mode
, valid_at_insn_p
);
5840 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5841 TARGET_MODE_REP_EXTENDED.
5843 Note that we assume that the property of
5844 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5845 narrower than mode B. I.e., if A is a mode narrower than B then in
5846 order to be able to operate on it in mode B, mode A needs to
5847 satisfy the requirements set by the representation of mode B. */
5850 init_num_sign_bit_copies_in_rep (void)
5852 opt_scalar_int_mode in_mode_iter
;
5853 scalar_int_mode mode
;
5855 FOR_EACH_MODE_IN_CLASS (in_mode_iter
, MODE_INT
)
5856 FOR_EACH_MODE_UNTIL (mode
, in_mode_iter
.require ())
5858 scalar_int_mode in_mode
= in_mode_iter
.require ();
5861 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5862 extends to the next widest mode. */
5863 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5864 || GET_MODE_WIDER_MODE (mode
).require () == in_mode
);
5866 /* We are in in_mode. Count how many bits outside of mode
5867 have to be copies of the sign-bit. */
5868 FOR_EACH_MODE (i
, mode
, in_mode
)
5870 /* This must always exist (for the last iteration it will be
5872 scalar_int_mode wider
= GET_MODE_WIDER_MODE (i
).require ();
5874 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5875 /* We can only check sign-bit copies starting from the
5876 top-bit. In order to be able to check the bits we
5877 have already seen we pretend that subsequent bits
5878 have to be sign-bit copies too. */
5879 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5880 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5881 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5886 /* Suppose that truncation from the machine mode of X to MODE is not a
5887 no-op. See if there is anything special about X so that we can
5888 assume it already contains a truncated value of MODE. */
5891 truncated_to_mode (machine_mode mode
, const_rtx x
)
5893 /* This register has already been used in MODE without explicit
5895 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5898 /* See if we already satisfy the requirements of MODE. If yes we
5899 can just switch to MODE. */
5900 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5901 && (num_sign_bit_copies (x
, GET_MODE (x
))
5902 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5908 /* Return true if RTX code CODE has a single sequence of zero or more
5909 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5910 entry in that case. */
5913 setup_reg_subrtx_bounds (unsigned int code
)
5915 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5917 for (; format
[i
] != 'e'; ++i
)
5920 /* No subrtxes. Leave start and count as 0. */
5922 if (format
[i
] == 'E' || format
[i
] == 'V')
5926 /* Record the sequence of 'e's. */
5927 rtx_all_subrtx_bounds
[code
].start
= i
;
5930 while (format
[i
] == 'e');
5931 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5932 /* rtl-iter.h relies on this. */
5933 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5935 for (; format
[i
]; ++i
)
5936 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5942 /* Initialize rtx_all_subrtx_bounds. */
5947 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5949 if (!setup_reg_subrtx_bounds (i
))
5950 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5951 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5952 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5955 init_num_sign_bit_copies_in_rep ();
5958 /* Check whether this is a constant pool constant. */
5960 constant_pool_constant_p (rtx x
)
5962 x
= avoid_constant_pool_reference (x
);
5963 return CONST_DOUBLE_P (x
);
5966 /* If M is a bitmask that selects a field of low-order bits within an item but
5967 not the entire word, return the length of the field. Return -1 otherwise.
5968 M is used in machine mode MODE. */
5971 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5973 if (mode
!= VOIDmode
)
5975 if (!HWI_COMPUTABLE_MODE_P (mode
))
5977 m
&= GET_MODE_MASK (mode
);
5980 return exact_log2 (m
+ 1);
5983 /* Return the mode of MEM's address. */
5986 get_address_mode (rtx mem
)
5990 gcc_assert (MEM_P (mem
));
5991 mode
= GET_MODE (XEXP (mem
, 0));
5992 if (mode
!= VOIDmode
)
5993 return as_a
<scalar_int_mode
> (mode
);
5994 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5997 /* Split up a CONST_DOUBLE or integer constant rtx
5998 into two rtx's for single words,
5999 storing in *FIRST the word that comes first in memory in the target
6000 and in *SECOND the other.
6002 TODO: This function needs to be rewritten to work on any size
6006 split_double (rtx value
, rtx
*first
, rtx
*second
)
6008 if (CONST_INT_P (value
))
6010 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
6012 /* In this case the CONST_INT holds both target words.
6013 Extract the bits from it into two word-sized pieces.
6014 Sign extend each half to HOST_WIDE_INT. */
6015 unsigned HOST_WIDE_INT low
, high
;
6016 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
6017 unsigned bits_per_word
= BITS_PER_WORD
;
6019 /* Set sign_bit to the most significant bit of a word. */
6021 sign_bit
<<= bits_per_word
- 1;
6023 /* Set mask so that all bits of the word are set. We could
6024 have used 1 << BITS_PER_WORD instead of basing the
6025 calculation on sign_bit. However, on machines where
6026 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6027 compiler warning, even though the code would never be
6029 mask
= sign_bit
<< 1;
6032 /* Set sign_extend as any remaining bits. */
6033 sign_extend
= ~mask
;
6035 /* Pick the lower word and sign-extend it. */
6036 low
= INTVAL (value
);
6041 /* Pick the higher word, shifted to the least significant
6042 bits, and sign-extend it. */
6043 high
= INTVAL (value
);
6044 high
>>= bits_per_word
- 1;
6047 if (high
& sign_bit
)
6048 high
|= sign_extend
;
6050 /* Store the words in the target machine order. */
6051 if (WORDS_BIG_ENDIAN
)
6053 *first
= GEN_INT (high
);
6054 *second
= GEN_INT (low
);
6058 *first
= GEN_INT (low
);
6059 *second
= GEN_INT (high
);
6064 /* The rule for using CONST_INT for a wider mode
6065 is that we regard the value as signed.
6066 So sign-extend it. */
6067 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
6068 if (WORDS_BIG_ENDIAN
)
6080 else if (GET_CODE (value
) == CONST_WIDE_INT
)
6082 /* All of this is scary code and needs to be converted to
6083 properly work with any size integer. */
6084 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
6085 if (WORDS_BIG_ENDIAN
)
6087 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6088 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6092 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6093 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6096 else if (!CONST_DOUBLE_P (value
))
6098 if (WORDS_BIG_ENDIAN
)
6100 *first
= const0_rtx
;
6106 *second
= const0_rtx
;
6109 else if (GET_MODE (value
) == VOIDmode
6110 /* This is the old way we did CONST_DOUBLE integers. */
6111 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
6113 /* In an integer, the words are defined as most and least significant.
6114 So order them by the target's convention. */
6115 if (WORDS_BIG_ENDIAN
)
6117 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6118 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
6122 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
6123 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6130 /* Note, this converts the REAL_VALUE_TYPE to the target's
6131 format, splits up the floating point double and outputs
6132 exactly 32 bits of it into each of l[0] and l[1] --
6133 not necessarily BITS_PER_WORD bits. */
6134 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
6136 /* If 32 bits is an entire word for the target, but not for the host,
6137 then sign-extend on the host so that the number will look the same
6138 way on the host that it would on the target. See for instance
6139 simplify_unary_operation. The #if is needed to avoid compiler
6142 #if HOST_BITS_PER_LONG > 32
6143 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
6145 if (l
[0] & ((long) 1 << 31))
6146 l
[0] |= ((unsigned long) (-1) << 32);
6147 if (l
[1] & ((long) 1 << 31))
6148 l
[1] |= ((unsigned long) (-1) << 32);
6152 *first
= GEN_INT (l
[0]);
6153 *second
= GEN_INT (l
[1]);
6157 /* Return true if X is a sign_extract or zero_extract from the least
6161 lsb_bitfield_op_p (rtx x
)
6163 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
6165 machine_mode mode
= GET_MODE (XEXP (x
, 0));
6166 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
6167 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
6168 poly_int64 remaining_bits
= GET_MODE_PRECISION (mode
) - len
;
6170 return known_eq (pos
, BITS_BIG_ENDIAN
? remaining_bits
: 0);
6175 /* Strip outer address "mutations" from LOC and return a pointer to the
6176 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6177 stripped expression there.
6179 "Mutations" either convert between modes or apply some kind of
6180 extension, truncation or alignment. */
6183 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
6187 enum rtx_code code
= GET_CODE (*loc
);
6188 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
6189 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6190 used to convert between pointer sizes. */
6191 loc
= &XEXP (*loc
, 0);
6192 else if (lsb_bitfield_op_p (*loc
))
6193 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6194 acts as a combined truncation and extension. */
6195 loc
= &XEXP (*loc
, 0);
6196 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6197 /* (and ... (const_int -X)) is used to align to X bytes. */
6198 loc
= &XEXP (*loc
, 0);
6199 else if (code
== SUBREG
6200 && !OBJECT_P (SUBREG_REG (*loc
))
6201 && subreg_lowpart_p (*loc
))
6202 /* (subreg (operator ...) ...) inside and is used for mode
6204 loc
= &SUBREG_REG (*loc
);
6212 /* Return true if CODE applies some kind of scale. The scaled value is
6213 is the first operand and the scale is the second. */
6216 binary_scale_code_p (enum rtx_code code
)
6218 return (code
== MULT
6220 /* Needed by ARM targets. */
6224 || code
== ROTATERT
);
6227 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6228 (see address_info). Return null otherwise. */
6231 get_base_term (rtx
*inner
)
6233 if (GET_CODE (*inner
) == LO_SUM
)
6234 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6237 || GET_CODE (*inner
) == SUBREG
6238 || GET_CODE (*inner
) == SCRATCH
)
6243 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6244 (see address_info). Return null otherwise. */
6247 get_index_term (rtx
*inner
)
6249 /* At present, only constant scales are allowed. */
6250 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6251 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6254 || GET_CODE (*inner
) == SUBREG
6255 || GET_CODE (*inner
) == SCRATCH
)
6260 /* Set the segment part of address INFO to LOC, given that INNER is the
6264 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6266 gcc_assert (!info
->segment
);
6267 info
->segment
= loc
;
6268 info
->segment_term
= inner
;
6271 /* Set the base part of address INFO to LOC, given that INNER is the
6275 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6277 gcc_assert (!info
->base
);
6279 info
->base_term
= inner
;
6282 /* Set the index part of address INFO to LOC, given that INNER is the
6286 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6288 gcc_assert (!info
->index
);
6290 info
->index_term
= inner
;
6293 /* Set the displacement part of address INFO to LOC, given that INNER
6294 is the constant term. */
6297 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6299 gcc_assert (!info
->disp
);
6301 info
->disp_term
= inner
;
6304 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6305 rest of INFO accordingly. */
6308 decompose_incdec_address (struct address_info
*info
)
6310 info
->autoinc_p
= true;
6312 rtx
*base
= &XEXP (*info
->inner
, 0);
6313 set_address_base (info
, base
, base
);
6314 gcc_checking_assert (info
->base
== info
->base_term
);
6316 /* These addresses are only valid when the size of the addressed
6318 gcc_checking_assert (info
->mode
!= VOIDmode
);
6321 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6322 of INFO accordingly. */
6325 decompose_automod_address (struct address_info
*info
)
6327 info
->autoinc_p
= true;
6329 rtx
*base
= &XEXP (*info
->inner
, 0);
6330 set_address_base (info
, base
, base
);
6331 gcc_checking_assert (info
->base
== info
->base_term
);
6333 rtx plus
= XEXP (*info
->inner
, 1);
6334 gcc_assert (GET_CODE (plus
) == PLUS
);
6336 info
->base_term2
= &XEXP (plus
, 0);
6337 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6339 rtx
*step
= &XEXP (plus
, 1);
6340 rtx
*inner_step
= strip_address_mutations (step
);
6341 if (CONSTANT_P (*inner_step
))
6342 set_address_disp (info
, step
, inner_step
);
6344 set_address_index (info
, step
, inner_step
);
6347 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6348 values in [PTR, END). Return a pointer to the end of the used array. */
6351 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6354 if (GET_CODE (x
) == PLUS
)
6356 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6357 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6361 gcc_assert (ptr
!= end
);
6367 /* Evaluate the likelihood of X being a base or index value, returning
6368 positive if it is likely to be a base, negative if it is likely to be
6369 an index, and 0 if we can't tell. Make the magnitude of the return
6370 value reflect the amount of confidence we have in the answer.
6372 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6375 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6376 enum rtx_code outer_code
, enum rtx_code index_code
)
6378 /* Believe *_POINTER unless the address shape requires otherwise. */
6379 if (REG_P (x
) && REG_POINTER (x
))
6381 if (MEM_P (x
) && MEM_POINTER (x
))
6384 if (REG_P (x
) && HARD_REGISTER_P (x
))
6386 /* X is a hard register. If it only fits one of the base
6387 or index classes, choose that interpretation. */
6388 int regno
= REGNO (x
);
6389 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6390 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6391 if (base_p
!= index_p
)
6392 return base_p
? 1 : -1;
6397 /* INFO->INNER describes a normal, non-automodified address.
6398 Fill in the rest of INFO accordingly. */
6401 decompose_normal_address (struct address_info
*info
)
6403 /* Treat the address as the sum of up to four values. */
6405 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6406 ops
+ ARRAY_SIZE (ops
)) - ops
;
6408 /* If there is more than one component, any base component is in a PLUS. */
6410 info
->base_outer_code
= PLUS
;
6412 /* Try to classify each sum operand now. Leave those that could be
6413 either a base or an index in OPS. */
6416 for (size_t in
= 0; in
< n_ops
; ++in
)
6419 rtx
*inner
= strip_address_mutations (loc
);
6420 if (CONSTANT_P (*inner
))
6421 set_address_disp (info
, loc
, inner
);
6422 else if (GET_CODE (*inner
) == UNSPEC
)
6423 set_address_segment (info
, loc
, inner
);
6426 /* The only other possibilities are a base or an index. */
6427 rtx
*base_term
= get_base_term (inner
);
6428 rtx
*index_term
= get_index_term (inner
);
6429 gcc_assert (base_term
|| index_term
);
6431 set_address_index (info
, loc
, index_term
);
6432 else if (!index_term
)
6433 set_address_base (info
, loc
, base_term
);
6436 gcc_assert (base_term
== index_term
);
6438 inner_ops
[out
] = base_term
;
6444 /* Classify the remaining OPS members as bases and indexes. */
6447 /* If we haven't seen a base or an index yet, assume that this is
6448 the base. If we were confident that another term was the base
6449 or index, treat the remaining operand as the other kind. */
6451 set_address_base (info
, ops
[0], inner_ops
[0]);
6453 set_address_index (info
, ops
[0], inner_ops
[0]);
6457 /* In the event of a tie, assume the base comes first. */
6458 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6460 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6461 GET_CODE (*ops
[0])))
6463 set_address_base (info
, ops
[0], inner_ops
[0]);
6464 set_address_index (info
, ops
[1], inner_ops
[1]);
6468 set_address_base (info
, ops
[1], inner_ops
[1]);
6469 set_address_index (info
, ops
[0], inner_ops
[0]);
6473 gcc_assert (out
== 0);
6476 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6477 or VOIDmode if not known. AS is the address space associated with LOC.
6478 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6481 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6482 addr_space_t as
, enum rtx_code outer_code
)
6484 memset (info
, 0, sizeof (*info
));
6487 info
->addr_outer_code
= outer_code
;
6489 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6490 info
->base_outer_code
= outer_code
;
6491 switch (GET_CODE (*info
->inner
))
6497 decompose_incdec_address (info
);
6502 decompose_automod_address (info
);
6506 decompose_normal_address (info
);
6511 /* Describe address operand LOC in INFO. */
6514 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6516 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6519 /* Describe the address of MEM X in INFO. */
6522 decompose_mem_address (struct address_info
*info
, rtx x
)
6524 gcc_assert (MEM_P (x
));
6525 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6526 MEM_ADDR_SPACE (x
), MEM
);
6529 /* Update INFO after a change to the address it describes. */
6532 update_address (struct address_info
*info
)
6534 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6535 info
->addr_outer_code
);
6538 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6539 more complicated than that. */
6542 get_index_scale (const struct address_info
*info
)
6544 rtx index
= *info
->index
;
6545 if (GET_CODE (index
) == MULT
6546 && CONST_INT_P (XEXP (index
, 1))
6547 && info
->index_term
== &XEXP (index
, 0))
6548 return INTVAL (XEXP (index
, 1));
6550 if (GET_CODE (index
) == ASHIFT
6551 && CONST_INT_P (XEXP (index
, 1))
6552 && info
->index_term
== &XEXP (index
, 0))
6553 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6555 if (info
->index
== info
->index_term
)
6561 /* Return the "index code" of INFO, in the form required by
6565 get_index_code (const struct address_info
*info
)
6568 return GET_CODE (*info
->index
);
6571 return GET_CODE (*info
->disp
);
6576 /* Return true if RTL X contains a SYMBOL_REF. */
6579 contains_symbol_ref_p (const_rtx x
)
6581 subrtx_iterator::array_type array
;
6582 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6583 if (SYMBOL_REF_P (*iter
))
6589 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6592 contains_symbolic_reference_p (const_rtx x
)
6594 subrtx_iterator::array_type array
;
6595 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6596 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6602 /* Return true if RTL X contains a constant pool address. */
6605 contains_constant_pool_address_p (const_rtx x
)
6607 subrtx_iterator::array_type array
;
6608 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6609 if (SYMBOL_REF_P (*iter
) && CONSTANT_POOL_ADDRESS_P (*iter
))
6616 /* Return true if X contains a thread-local symbol. */
6619 tls_referenced_p (const_rtx x
)
6621 if (!targetm
.have_tls
)
6624 subrtx_iterator::array_type array
;
6625 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6626 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)
6631 /* Return true if reg REGNO with mode REG_MODE would be clobbered by the
6632 clobber_high operand in CLOBBER_HIGH_OP. */
6635 reg_is_clobbered_by_clobber_high (unsigned int regno
, machine_mode reg_mode
,
6636 const_rtx clobber_high_op
)
6638 unsigned int clobber_regno
= REGNO (clobber_high_op
);
6639 machine_mode clobber_mode
= GET_MODE (clobber_high_op
);
6640 unsigned char regno_nregs
= hard_regno_nregs (regno
, reg_mode
);
6642 /* Clobber high should always span exactly one register. */
6643 gcc_assert (REG_NREGS (clobber_high_op
) == 1);
6645 /* Clobber high needs to match with one of the registers in X. */
6646 if (clobber_regno
< regno
|| clobber_regno
>= regno
+ regno_nregs
)
6649 gcc_assert (reg_mode
!= BLKmode
&& clobber_mode
!= BLKmode
);
6651 if (reg_mode
== VOIDmode
)
6652 return clobber_mode
!= VOIDmode
;
6654 /* Clobber high will clobber if its size might be greater than the size of
6656 return maybe_gt (exact_div (GET_MODE_SIZE (reg_mode
), regno_nregs
),
6657 GET_MODE_SIZE (clobber_mode
));