1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "insn-config.h"
35 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
37 #include "addresses.h"
39 #include "hard-reg-set.h"
40 #include "function-abi.h"
42 /* Forward declarations */
43 static void set_of_1 (rtx
, const_rtx
, void *);
44 static bool covers_regno_p (const_rtx
, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
46 static int computed_jump_p_1 (const_rtx
);
47 static void parms_set (rtx
, const_rtx
, void *);
49 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, scalar_int_mode
,
50 const_rtx
, machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, scalar_int_mode
,
53 const_rtx
, machine_mode
,
54 unsigned HOST_WIDE_INT
);
55 static unsigned int cached_num_sign_bit_copies (const_rtx
, scalar_int_mode
,
56 const_rtx
, machine_mode
,
58 static unsigned int num_sign_bit_copies1 (const_rtx
, scalar_int_mode
,
59 const_rtx
, machine_mode
,
62 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
63 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
79 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
81 /* Store X into index I of ARRAY. ARRAY is known to have at least I
82 elements. Return the new base of ARRAY. */
85 typename
T::value_type
*
86 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
88 size_t i
, value_type x
)
90 if (base
== array
.stack
)
97 gcc_checking_assert (i
== LOCAL_ELEMS
);
98 /* A previous iteration might also have moved from the stack to the
99 heap, in which case the heap array will already be big enough. */
100 if (vec_safe_length (array
.heap
) <= i
)
101 vec_safe_grow (array
.heap
, i
+ 1, true);
102 base
= array
.heap
->address ();
103 memcpy (base
, array
.stack
, sizeof (array
.stack
));
104 base
[LOCAL_ELEMS
] = x
;
107 unsigned int length
= array
.heap
->length ();
110 gcc_checking_assert (base
== array
.heap
->address ());
116 gcc_checking_assert (i
== length
);
117 vec_safe_push (array
.heap
, x
);
118 return array
.heap
->address ();
122 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
123 number of elements added to the worklist. */
125 template <typename T
>
127 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
129 size_t end
, rtx_type x
)
131 enum rtx_code code
= GET_CODE (x
);
132 const char *format
= GET_RTX_FORMAT (code
);
133 size_t orig_end
= end
;
134 if (__builtin_expect (INSN_P (x
), false))
136 /* Put the pattern at the top of the queue, since that's what
137 we're likely to want most. It also allows for the SEQUENCE
139 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
140 if (format
[i
] == 'e')
142 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
143 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
146 base
= add_single_to_queue (array
, base
, end
++, subx
);
150 for (int i
= 0; format
[i
]; ++i
)
151 if (format
[i
] == 'e')
153 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
154 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
157 base
= add_single_to_queue (array
, base
, end
++, subx
);
159 else if (format
[i
] == 'E')
161 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
162 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
163 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
164 for (unsigned int j
= 0; j
< length
; j
++)
165 base
[end
++] = T::get_value (vec
[j
]);
167 for (unsigned int j
= 0; j
< length
; j
++)
168 base
= add_single_to_queue (array
, base
, end
++,
169 T::get_value (vec
[j
]));
170 if (code
== SEQUENCE
&& end
== length
)
171 /* If the subrtxes of the sequence fill the entire array then
172 we know that no other parts of a containing insn are queued.
173 The caller is therefore iterating over the sequence as a
174 PATTERN (...), so we also want the patterns of the
176 for (unsigned int j
= 0; j
< length
; j
++)
178 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
180 base
[j
] = T::get_value (PATTERN (x
));
183 return end
- orig_end
;
186 template <typename T
>
188 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
190 vec_free (array
.heap
);
193 template <typename T
>
194 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
196 template class generic_subrtx_iterator
<const_rtx_accessor
>;
197 template class generic_subrtx_iterator
<rtx_var_accessor
>;
198 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
200 /* Return 1 if the value of X is unstable
201 (would be different at a different point in the program).
202 The frame pointer, arg pointer, etc. are considered stable
203 (within one function) and so is anything marked `unchanging'. */
206 rtx_unstable_p (const_rtx x
)
208 const RTX_CODE code
= GET_CODE (x
);
215 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
224 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
225 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
226 /* The arg pointer varies if it is not a fixed register. */
227 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
229 /* ??? When call-clobbered, the value is stable modulo the restore
230 that must happen after a call. This currently screws up local-alloc
231 into believing that the restore is not needed. */
232 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
237 if (MEM_VOLATILE_P (x
))
246 fmt
= GET_RTX_FORMAT (code
);
247 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
250 if (rtx_unstable_p (XEXP (x
, i
)))
253 else if (fmt
[i
] == 'E')
256 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
257 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
264 /* Return 1 if X has a value that can vary even between two
265 executions of the program. 0 means X can be compared reliably
266 against certain constants or near-constants.
267 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
268 zero, we are slightly more conservative.
269 The frame pointer and the arg pointer are considered constant. */
272 rtx_varies_p (const_rtx x
, bool for_alias
)
285 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
294 /* Note that we have to test for the actual rtx used for the frame
295 and arg pointers and not just the register number in case we have
296 eliminated the frame and/or arg pointer and are using it
298 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
299 /* The arg pointer varies if it is not a fixed register. */
300 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
302 if (x
== pic_offset_table_rtx
303 /* ??? When call-clobbered, the value is stable modulo the restore
304 that must happen after a call. This currently screws up
305 local-alloc into believing that the restore is not needed, so we
306 must return 0 only if we are called from alias analysis. */
307 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
312 /* The operand 0 of a LO_SUM is considered constant
313 (in fact it is related specifically to operand 1)
314 during alias analysis. */
315 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
316 || rtx_varies_p (XEXP (x
, 1), for_alias
);
319 if (MEM_VOLATILE_P (x
))
328 fmt
= GET_RTX_FORMAT (code
);
329 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
332 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
335 else if (fmt
[i
] == 'E')
338 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
339 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
346 /* Compute an approximation for the offset between the register
347 FROM and TO for the current function, as it was at the start
351 get_initial_register_offset (int from
, int to
)
353 static const struct elim_table_t
357 } table
[] = ELIMINABLE_REGS
;
358 poly_int64 offset1
, offset2
;
364 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
365 is completed, but we need to give at least an estimate for the stack
366 pointer based on the frame size. */
367 if (!epilogue_completed
)
369 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
370 #if !STACK_GROWS_DOWNWARD
373 if (to
== STACK_POINTER_REGNUM
)
375 else if (from
== STACK_POINTER_REGNUM
)
381 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
382 if (table
[i
].from
== from
)
384 if (table
[i
].to
== to
)
386 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
390 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
392 if (table
[j
].to
== to
393 && table
[j
].from
== table
[i
].to
)
395 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
397 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
399 return offset1
+ offset2
;
401 if (table
[j
].from
== to
402 && table
[j
].to
== table
[i
].to
)
404 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
406 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
408 return offset1
- offset2
;
412 else if (table
[i
].to
== from
)
414 if (table
[i
].from
== to
)
416 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
420 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
422 if (table
[j
].to
== to
423 && table
[j
].from
== table
[i
].from
)
425 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
427 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
429 return - offset1
+ offset2
;
431 if (table
[j
].from
== to
432 && table
[j
].to
== table
[i
].from
)
434 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
436 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
438 return - offset1
- offset2
;
443 /* If the requested register combination was not found,
444 try a different more simple combination. */
445 if (from
== ARG_POINTER_REGNUM
)
446 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
447 else if (to
== ARG_POINTER_REGNUM
)
448 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
449 else if (from
== HARD_FRAME_POINTER_REGNUM
)
450 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
451 else if (to
== HARD_FRAME_POINTER_REGNUM
)
452 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
457 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
458 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
459 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
460 references on strict alignment machines. */
463 rtx_addr_can_trap_p_1 (const_rtx x
, poly_int64 offset
, poly_int64 size
,
464 machine_mode mode
, bool unaligned_mems
)
466 enum rtx_code code
= GET_CODE (x
);
467 gcc_checking_assert (mode
== BLKmode
|| known_size_p (size
));
470 /* The offset must be a multiple of the mode size if we are considering
471 unaligned memory references on strict alignment machines. */
472 if (STRICT_ALIGNMENT
&& unaligned_mems
&& mode
!= BLKmode
)
474 poly_int64 actual_offset
= offset
;
476 #ifdef SPARC_STACK_BOUNDARY_HACK
477 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
478 the real alignment of %sp. However, when it does this, the
479 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
480 if (SPARC_STACK_BOUNDARY_HACK
481 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
482 actual_offset
-= STACK_POINTER_OFFSET
;
485 if (!multiple_p (actual_offset
, GET_MODE_SIZE (mode
)))
492 if (SYMBOL_REF_WEAK (x
))
494 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
497 poly_int64 decl_size
;
499 if (maybe_lt (offset
, 0))
501 if (!known_size_p (size
))
502 return maybe_ne (offset
, 0);
504 /* If the size of the access or of the symbol is unknown,
506 decl
= SYMBOL_REF_DECL (x
);
508 /* Else check that the access is in bounds. TODO: restructure
509 expr_size/tree_expr_size/int_expr_size and just use the latter. */
512 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
514 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl
), &decl_size
))
517 else if (TREE_CODE (decl
) == STRING_CST
)
518 decl_size
= TREE_STRING_LENGTH (decl
);
519 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
520 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
524 return (!known_size_p (decl_size
) || known_eq (decl_size
, 0)
525 ? maybe_ne (offset
, 0)
526 : !known_subrange_p (offset
, size
, 0, decl_size
));
535 /* Stack references are assumed not to trap, but we need to deal with
536 nonsensical offsets. */
537 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
538 || x
== stack_pointer_rtx
539 /* The arg pointer varies if it is not a fixed register. */
540 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
543 poly_int64 red_zone_size
= RED_ZONE_SIZE
;
545 poly_int64 red_zone_size
= 0;
547 poly_int64 stack_boundary
= PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
;
548 poly_int64 low_bound
, high_bound
;
550 if (!known_size_p (size
))
553 if (x
== frame_pointer_rtx
)
555 if (FRAME_GROWS_DOWNWARD
)
557 high_bound
= targetm
.starting_frame_offset ();
558 low_bound
= high_bound
- get_frame_size ();
562 low_bound
= targetm
.starting_frame_offset ();
563 high_bound
= low_bound
+ get_frame_size ();
566 else if (x
== hard_frame_pointer_rtx
)
569 = get_initial_register_offset (STACK_POINTER_REGNUM
,
570 HARD_FRAME_POINTER_REGNUM
);
572 = get_initial_register_offset (ARG_POINTER_REGNUM
,
573 HARD_FRAME_POINTER_REGNUM
);
575 #if STACK_GROWS_DOWNWARD
576 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
577 high_bound
= ap_offset
578 + FIRST_PARM_OFFSET (current_function_decl
)
579 #if !ARGS_GROW_DOWNWARD
584 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
585 low_bound
= ap_offset
586 + FIRST_PARM_OFFSET (current_function_decl
)
587 #if ARGS_GROW_DOWNWARD
593 else if (x
== stack_pointer_rtx
)
596 = get_initial_register_offset (ARG_POINTER_REGNUM
,
597 STACK_POINTER_REGNUM
);
599 #if STACK_GROWS_DOWNWARD
600 low_bound
= - red_zone_size
- stack_boundary
;
601 high_bound
= ap_offset
602 + FIRST_PARM_OFFSET (current_function_decl
)
603 #if !ARGS_GROW_DOWNWARD
608 high_bound
= red_zone_size
+ stack_boundary
;
609 low_bound
= ap_offset
610 + FIRST_PARM_OFFSET (current_function_decl
)
611 #if ARGS_GROW_DOWNWARD
619 /* We assume that accesses are safe to at least the
621 Examples are varargs and __builtin_return_address. */
622 #if ARGS_GROW_DOWNWARD
623 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
625 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
626 - crtl
->args
.size
- stack_boundary
;
628 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
630 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
631 + crtl
->args
.size
+ stack_boundary
;
635 if (known_ge (offset
, low_bound
)
636 && known_le (offset
, high_bound
- size
))
640 /* All of the virtual frame registers are stack references. */
641 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
642 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
647 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
648 mode
, unaligned_mems
);
651 /* An address is assumed not to trap if:
652 - it is the pic register plus a const unspec without offset. */
653 if (XEXP (x
, 0) == pic_offset_table_rtx
654 && GET_CODE (XEXP (x
, 1)) == CONST
655 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
656 && known_eq (offset
, 0))
659 /* - or it is an address that can't trap plus a constant integer. */
660 if (poly_int_rtx_p (XEXP (x
, 1), &const_x1
)
661 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ const_x1
,
662 size
, mode
, unaligned_mems
))
669 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
670 mode
, unaligned_mems
);
677 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
678 mode
, unaligned_mems
);
684 /* If it isn't one of the case above, it can cause a trap. */
688 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
691 rtx_addr_can_trap_p (const_rtx x
)
693 return rtx_addr_can_trap_p_1 (x
, 0, -1, BLKmode
, false);
696 /* Return true if X contains a MEM subrtx. */
699 contains_mem_rtx_p (rtx x
)
701 subrtx_iterator::array_type array
;
702 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
709 /* Return true if X is an address that is known to not be zero. */
712 nonzero_address_p (const_rtx x
)
714 const enum rtx_code code
= GET_CODE (x
);
719 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
725 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
726 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
727 || x
== stack_pointer_rtx
728 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
730 /* All of the virtual frame registers are stack references. */
731 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
732 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
737 return nonzero_address_p (XEXP (x
, 0));
740 /* Handle PIC references. */
741 if (XEXP (x
, 0) == pic_offset_table_rtx
742 && CONSTANT_P (XEXP (x
, 1)))
747 /* Similar to the above; allow positive offsets. Further, since
748 auto-inc is only allowed in memories, the register must be a
750 if (CONST_INT_P (XEXP (x
, 1))
751 && INTVAL (XEXP (x
, 1)) > 0)
753 return nonzero_address_p (XEXP (x
, 0));
756 /* Similarly. Further, the offset is always positive. */
763 return nonzero_address_p (XEXP (x
, 0));
766 return nonzero_address_p (XEXP (x
, 1));
772 /* If it isn't one of the case above, might be zero. */
776 /* Return 1 if X refers to a memory location whose address
777 cannot be compared reliably with constant addresses,
778 or if X refers to a BLKmode memory object.
779 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
780 zero, we are slightly more conservative. */
783 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
794 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
796 fmt
= GET_RTX_FORMAT (code
);
797 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
800 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
803 else if (fmt
[i
] == 'E')
806 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
807 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
813 /* Return the CALL in X if there is one. */
816 get_call_rtx_from (const rtx_insn
*insn
)
818 rtx x
= PATTERN (insn
);
819 if (GET_CODE (x
) == PARALLEL
)
820 x
= XVECEXP (x
, 0, 0);
821 if (GET_CODE (x
) == SET
)
823 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
828 /* Get the declaration of the function called by INSN. */
831 get_call_fndecl (const rtx_insn
*insn
)
835 note
= find_reg_note (insn
, REG_CALL_DECL
, NULL_RTX
);
836 if (note
== NULL_RTX
)
839 datum
= XEXP (note
, 0);
840 if (datum
!= NULL_RTX
)
841 return SYMBOL_REF_DECL (datum
);
846 /* Return the value of the integer term in X, if one is apparent;
848 Only obvious integer terms are detected.
849 This is used in cse.c with the `related_value' field. */
852 get_integer_term (const_rtx x
)
854 if (GET_CODE (x
) == CONST
)
857 if (GET_CODE (x
) == MINUS
858 && CONST_INT_P (XEXP (x
, 1)))
859 return - INTVAL (XEXP (x
, 1));
860 if (GET_CODE (x
) == PLUS
861 && CONST_INT_P (XEXP (x
, 1)))
862 return INTVAL (XEXP (x
, 1));
866 /* If X is a constant, return the value sans apparent integer term;
868 Only obvious integer terms are detected. */
871 get_related_value (const_rtx x
)
873 if (GET_CODE (x
) != CONST
)
876 if (GET_CODE (x
) == PLUS
877 && CONST_INT_P (XEXP (x
, 1)))
879 else if (GET_CODE (x
) == MINUS
880 && CONST_INT_P (XEXP (x
, 1)))
885 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
886 to somewhere in the same object or object_block as SYMBOL. */
889 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
893 if (GET_CODE (symbol
) != SYMBOL_REF
)
901 if (CONSTANT_POOL_ADDRESS_P (symbol
)
902 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
905 decl
= SYMBOL_REF_DECL (symbol
);
906 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
910 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
911 && SYMBOL_REF_BLOCK (symbol
)
912 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
913 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
914 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
920 /* Split X into a base and a constant offset, storing them in *BASE_OUT
921 and *OFFSET_OUT respectively. */
924 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
926 if (GET_CODE (x
) == CONST
)
929 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
931 *base_out
= XEXP (x
, 0);
932 *offset_out
= XEXP (x
, 1);
937 *offset_out
= const0_rtx
;
940 /* Express integer value X as some value Y plus a polynomial offset,
941 where Y is either const0_rtx, X or something within X (as opposed
942 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
945 strip_offset (rtx x
, poly_int64_pod
*offset_out
)
947 rtx base
= const0_rtx
;
949 if (GET_CODE (test
) == CONST
)
950 test
= XEXP (test
, 0);
951 if (GET_CODE (test
) == PLUS
)
953 base
= XEXP (test
, 0);
954 test
= XEXP (test
, 1);
956 if (poly_int_rtx_p (test
, offset_out
))
962 /* Return the argument size in REG_ARGS_SIZE note X. */
965 get_args_size (const_rtx x
)
967 gcc_checking_assert (REG_NOTE_KIND (x
) == REG_ARGS_SIZE
);
968 return rtx_to_poly_int64 (XEXP (x
, 0));
971 /* Return the number of places FIND appears within X. If COUNT_DEST is
972 zero, we do not count occurrences inside the destination of a SET. */
975 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
979 const char *format_ptr
;
998 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
1000 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
1004 if (MEM_P (find
) && rtx_equal_p (x
, find
))
1009 if (SET_DEST (x
) == find
&& ! count_dest
)
1010 return count_occurrences (SET_SRC (x
), find
, count_dest
);
1017 format_ptr
= GET_RTX_FORMAT (code
);
1020 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
1022 switch (*format_ptr
++)
1025 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
1029 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1030 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
1038 /* Return TRUE if OP is a register or subreg of a register that
1039 holds an unsigned quantity. Otherwise, return FALSE. */
1042 unsigned_reg_p (rtx op
)
1046 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1049 if (GET_CODE (op
) == SUBREG
1050 && SUBREG_PROMOTED_SIGN (op
))
1057 /* Nonzero if register REG appears somewhere within IN.
1058 Also works if REG is not a register; in this case it checks
1059 for a subexpression of IN that is Lisp "equal" to REG. */
1062 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1074 if (GET_CODE (in
) == LABEL_REF
)
1075 return reg
== label_ref_label (in
);
1077 code
= GET_CODE (in
);
1081 /* Compare registers by number. */
1083 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1085 /* These codes have no constituent expressions
1093 /* These are kept unique for a given value. */
1100 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1103 fmt
= GET_RTX_FORMAT (code
);
1105 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1110 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1111 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1114 else if (fmt
[i
] == 'e'
1115 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1121 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1122 no CODE_LABEL insn. */
1125 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1130 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1136 /* Nonzero if register REG is used in an insn between
1137 FROM_INSN and TO_INSN (exclusive of those two). */
1140 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1141 const rtx_insn
*to_insn
)
1145 if (from_insn
== to_insn
)
1148 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1149 if (NONDEBUG_INSN_P (insn
)
1150 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1151 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1156 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1157 is entirely replaced by a new value and the only use is as a SET_DEST,
1158 we do not consider it a reference. */
1161 reg_referenced_p (const_rtx x
, const_rtx body
)
1165 switch (GET_CODE (body
))
1168 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1171 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1172 of a REG that occupies all of the REG, the insn references X if
1173 it is mentioned in the destination. */
1174 if (GET_CODE (SET_DEST (body
)) != CC0
1175 && GET_CODE (SET_DEST (body
)) != PC
1176 && !REG_P (SET_DEST (body
))
1177 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1178 && REG_P (SUBREG_REG (SET_DEST (body
)))
1179 && !read_modify_subreg_p (SET_DEST (body
)))
1180 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1185 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1186 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1193 return reg_overlap_mentioned_p (x
, body
);
1196 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1199 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1202 case UNSPEC_VOLATILE
:
1203 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1204 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1209 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1210 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1215 if (MEM_P (XEXP (body
, 0)))
1216 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1221 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1223 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1230 /* Nonzero if register REG is set or clobbered in an insn between
1231 FROM_INSN and TO_INSN (exclusive of those two). */
1234 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1235 const rtx_insn
*to_insn
)
1237 const rtx_insn
*insn
;
1239 if (from_insn
== to_insn
)
1242 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1243 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1248 /* Return true if REG is set or clobbered inside INSN. */
1251 reg_set_p (const_rtx reg
, const_rtx insn
)
1253 /* After delay slot handling, call and branch insns might be in a
1254 sequence. Check all the elements there. */
1255 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1257 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1258 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1264 /* We can be passed an insn or part of one. If we are passed an insn,
1265 check if a side-effect of the insn clobbers REG. */
1267 && (FIND_REG_INC_NOTE (insn
, reg
)
1270 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1271 && (insn_callee_abi (as_a
<const rtx_insn
*> (insn
))
1272 .clobbers_reg_p (GET_MODE (reg
), REGNO (reg
))))
1274 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1277 /* There are no REG_INC notes for SP autoinc. */
1278 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1280 subrtx_var_iterator::array_type array
;
1281 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1286 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1288 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1290 iter
.skip_subrtxes ();
1295 return set_of (reg
, insn
) != NULL_RTX
;
1298 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1299 only if none of them are modified between START and END. Return 1 if
1300 X contains a MEM; this routine does use memory aliasing. */
1303 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1305 const enum rtx_code code
= GET_CODE (x
);
1326 if (modified_between_p (XEXP (x
, 0), start
, end
))
1328 if (MEM_READONLY_P (x
))
1330 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1331 if (memory_modified_in_insn_p (x
, insn
))
1336 return reg_set_between_p (x
, start
, end
);
1342 fmt
= GET_RTX_FORMAT (code
);
1343 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1345 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1348 else if (fmt
[i
] == 'E')
1349 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1350 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1357 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1358 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1359 does use memory aliasing. */
1362 modified_in_p (const_rtx x
, const_rtx insn
)
1364 const enum rtx_code code
= GET_CODE (x
);
1381 if (modified_in_p (XEXP (x
, 0), insn
))
1383 if (MEM_READONLY_P (x
))
1385 if (memory_modified_in_insn_p (x
, insn
))
1390 return reg_set_p (x
, insn
);
1396 fmt
= GET_RTX_FORMAT (code
);
1397 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1399 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1402 else if (fmt
[i
] == 'E')
1403 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1404 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1411 /* Return true if X is a SUBREG and if storing a value to X would
1412 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1413 target, using a SUBREG to store to one half of a DImode REG would
1414 preserve the other half. */
1417 read_modify_subreg_p (const_rtx x
)
1419 if (GET_CODE (x
) != SUBREG
)
1421 poly_uint64 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1422 poly_uint64 osize
= GET_MODE_SIZE (GET_MODE (x
));
1423 poly_uint64 regsize
= REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x
)));
1424 /* The inner and outer modes of a subreg must be ordered, so that we
1425 can tell whether they're paradoxical or partial. */
1426 gcc_checking_assert (ordered_p (isize
, osize
));
1427 return (maybe_gt (isize
, osize
) && maybe_gt (isize
, regsize
));
1430 /* Helper function for set_of. */
1438 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1440 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1441 if (rtx_equal_p (x
, data
->pat
)
1442 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1446 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1447 (either directly or via STRICT_LOW_PART and similar modifiers). */
1449 set_of (const_rtx pat
, const_rtx insn
)
1451 struct set_of_data data
;
1452 data
.found
= NULL_RTX
;
1454 note_pattern_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1458 /* Check whether instruction pattern PAT contains a SET with the following
1461 - the SET is executed unconditionally; and
1463 - the destination of the SET is a REG that contains REGNO; or
1465 - the destination of the SET is a SUBREG of such a REG; and
1466 - writing to the subreg clobbers all of the SUBREG_REG
1467 (in other words, read_modify_subreg_p is false).
1469 If PAT does have a SET like that, return the set, otherwise return null.
1471 This is intended to be an alternative to single_set for passes that
1472 can handle patterns with multiple_sets. */
1474 simple_regno_set (rtx pat
, unsigned int regno
)
1476 if (GET_CODE (pat
) == PARALLEL
)
1478 int last
= XVECLEN (pat
, 0) - 1;
1479 for (int i
= 0; i
< last
; ++i
)
1480 if (rtx set
= simple_regno_set (XVECEXP (pat
, 0, i
), regno
))
1483 pat
= XVECEXP (pat
, 0, last
);
1486 if (GET_CODE (pat
) == SET
1487 && covers_regno_no_parallel_p (SET_DEST (pat
), regno
))
1493 /* Add all hard register in X to *PSET. */
1495 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1497 subrtx_iterator::array_type array
;
1498 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1500 const_rtx x
= *iter
;
1501 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1502 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1506 /* This function, called through note_stores, collects sets and
1507 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1510 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1512 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1513 if (REG_P (x
) && HARD_REGISTER_P (x
))
1514 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1517 /* Examine INSN, and compute the set of hard registers written by it.
1518 Store it in *PSET. Should only be called after reload.
1520 IMPLICIT is true if we should include registers that are fully-clobbered
1521 by calls. This should be used with caution, since it doesn't include
1522 partially-clobbered registers. */
1524 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1528 CLEAR_HARD_REG_SET (*pset
);
1529 note_stores (insn
, record_hard_reg_sets
, pset
);
1530 if (CALL_P (insn
) && implicit
)
1531 *pset
|= insn_callee_abi (insn
).full_reg_clobbers ();
1532 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1533 if (REG_NOTE_KIND (link
) == REG_INC
)
1534 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1537 /* Like record_hard_reg_sets, but called through note_uses. */
1539 record_hard_reg_uses (rtx
*px
, void *data
)
1541 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1544 /* Given an INSN, return a SET expression if this insn has only a single SET.
1545 It may also have CLOBBERs, USEs, or SET whose output
1546 will not be used, which we ignore. */
1549 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1552 int set_verified
= 1;
1555 if (GET_CODE (pat
) == PARALLEL
)
1557 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1559 rtx sub
= XVECEXP (pat
, 0, i
);
1560 switch (GET_CODE (sub
))
1567 /* We can consider insns having multiple sets, where all
1568 but one are dead as single set insns. In common case
1569 only single set is present in the pattern so we want
1570 to avoid checking for REG_UNUSED notes unless necessary.
1572 When we reach set first time, we just expect this is
1573 the single set we are looking for and only when more
1574 sets are found in the insn, we check them. */
1577 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1578 && !side_effects_p (set
))
1584 set
= sub
, set_verified
= 0;
1585 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1586 || side_effects_p (sub
))
1598 /* Given an INSN, return nonzero if it has more than one SET, else return
1602 multiple_sets (const_rtx insn
)
1607 /* INSN must be an insn. */
1608 if (! INSN_P (insn
))
1611 /* Only a PARALLEL can have multiple SETs. */
1612 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1614 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1615 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1617 /* If we have already found a SET, then return now. */
1625 /* Either zero or one SET. */
1629 /* Return nonzero if the destination of SET equals the source
1630 and there are no side effects. */
1633 set_noop_p (const_rtx set
)
1635 rtx src
= SET_SRC (set
);
1636 rtx dst
= SET_DEST (set
);
1638 if (dst
== pc_rtx
&& src
== pc_rtx
)
1641 if (MEM_P (dst
) && MEM_P (src
))
1642 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1644 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1645 return rtx_equal_p (XEXP (dst
, 0), src
)
1646 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1647 && !side_effects_p (src
);
1649 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1650 dst
= XEXP (dst
, 0);
1652 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1654 if (maybe_ne (SUBREG_BYTE (src
), SUBREG_BYTE (dst
)))
1656 src
= SUBREG_REG (src
);
1657 dst
= SUBREG_REG (dst
);
1658 if (GET_MODE (src
) != GET_MODE (dst
))
1659 /* It is hard to tell whether subregs refer to the same bits, so act
1660 conservatively and return 0. */
1664 /* It is a NOOP if destination overlaps with selected src vector
1666 if (GET_CODE (src
) == VEC_SELECT
1667 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1668 && HARD_REGISTER_P (XEXP (src
, 0))
1669 && HARD_REGISTER_P (dst
))
1672 rtx par
= XEXP (src
, 1);
1673 rtx src0
= XEXP (src
, 0);
1675 if (!poly_int_rtx_p (XVECEXP (par
, 0, 0), &c0
))
1677 poly_int64 offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1679 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1682 if (!poly_int_rtx_p (XVECEXP (par
, 0, i
), &c0i
)
1683 || maybe_ne (c0i
, c0
+ i
))
1687 REG_CAN_CHANGE_MODE_P (REGNO (dst
), GET_MODE (src0
), GET_MODE (dst
))
1688 && simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1689 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1692 return (REG_P (src
) && REG_P (dst
)
1693 && REGNO (src
) == REGNO (dst
));
1696 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1700 noop_move_p (const rtx_insn
*insn
)
1702 rtx pat
= PATTERN (insn
);
1704 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1707 /* Check the code to be executed for COND_EXEC. */
1708 if (GET_CODE (pat
) == COND_EXEC
)
1709 pat
= COND_EXEC_CODE (pat
);
1711 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1714 if (GET_CODE (pat
) == PARALLEL
)
1717 /* If nothing but SETs of registers to themselves,
1718 this insn can also be deleted. */
1719 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1721 rtx tem
= XVECEXP (pat
, 0, i
);
1723 if (GET_CODE (tem
) == USE
|| GET_CODE (tem
) == CLOBBER
)
1726 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1736 /* Return nonzero if register in range [REGNO, ENDREGNO)
1737 appears either explicitly or implicitly in X
1738 other than being stored into.
1740 References contained within the substructure at LOC do not count.
1741 LOC may be zero, meaning don't ignore anything. */
1744 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1748 unsigned int x_regno
;
1753 /* The contents of a REG_NONNEG note is always zero, so we must come here
1754 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1758 code
= GET_CODE (x
);
1763 x_regno
= REGNO (x
);
1765 /* If we modifying the stack, frame, or argument pointer, it will
1766 clobber a virtual register. In fact, we could be more precise,
1767 but it isn't worth it. */
1768 if ((x_regno
== STACK_POINTER_REGNUM
1769 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1770 && x_regno
== ARG_POINTER_REGNUM
)
1771 || x_regno
== FRAME_POINTER_REGNUM
)
1772 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1775 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1778 /* If this is a SUBREG of a hard reg, we can see exactly which
1779 registers are being modified. Otherwise, handle normally. */
1780 if (REG_P (SUBREG_REG (x
))
1781 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1783 unsigned int inner_regno
= subreg_regno (x
);
1784 unsigned int inner_endregno
1785 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1786 ? subreg_nregs (x
) : 1);
1788 return endregno
> inner_regno
&& regno
< inner_endregno
;
1794 if (&SET_DEST (x
) != loc
1795 /* Note setting a SUBREG counts as referring to the REG it is in for
1796 a pseudo but not for hard registers since we can
1797 treat each word individually. */
1798 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1799 && loc
!= &SUBREG_REG (SET_DEST (x
))
1800 && REG_P (SUBREG_REG (SET_DEST (x
)))
1801 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1802 && refers_to_regno_p (regno
, endregno
,
1803 SUBREG_REG (SET_DEST (x
)), loc
))
1804 || (!REG_P (SET_DEST (x
))
1805 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1808 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1817 /* X does not match, so try its subexpressions. */
1819 fmt
= GET_RTX_FORMAT (code
);
1820 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1822 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1830 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1833 else if (fmt
[i
] == 'E')
1836 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1837 if (loc
!= &XVECEXP (x
, i
, j
)
1838 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1845 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1846 we check if any register number in X conflicts with the relevant register
1847 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1848 contains a MEM (we don't bother checking for memory addresses that can't
1849 conflict because we expect this to be a rare case. */
1852 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1854 unsigned int regno
, endregno
;
1856 /* If either argument is a constant, then modifying X cannot
1857 affect IN. Here we look at IN, we can profitably combine
1858 CONSTANT_P (x) with the switch statement below. */
1859 if (CONSTANT_P (in
))
1863 switch (GET_CODE (x
))
1866 case STRICT_LOW_PART
:
1869 /* Overly conservative. */
1874 regno
= REGNO (SUBREG_REG (x
));
1875 if (regno
< FIRST_PSEUDO_REGISTER
)
1876 regno
= subreg_regno (x
);
1877 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1878 ? subreg_nregs (x
) : 1);
1883 endregno
= END_REGNO (x
);
1885 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1895 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1896 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1899 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1902 else if (fmt
[i
] == 'E')
1905 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1906 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1916 return reg_mentioned_p (x
, in
);
1922 /* If any register in here refers to it we return true. */
1923 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1924 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1925 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1931 gcc_assert (CONSTANT_P (x
));
1936 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1937 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1938 ignored by note_stores, but passed to FUN.
1940 FUN receives three arguments:
1941 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1942 2. the SET or CLOBBER rtx that does the store,
1943 3. the pointer DATA provided to note_stores.
1945 If the item being stored in or clobbered is a SUBREG of a hard register,
1946 the SUBREG will be passed. */
1949 note_pattern_stores (const_rtx x
,
1950 void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1954 if (GET_CODE (x
) == COND_EXEC
)
1955 x
= COND_EXEC_CODE (x
);
1957 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1959 rtx dest
= SET_DEST (x
);
1961 while ((GET_CODE (dest
) == SUBREG
1962 && (!REG_P (SUBREG_REG (dest
))
1963 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1964 || GET_CODE (dest
) == ZERO_EXTRACT
1965 || GET_CODE (dest
) == STRICT_LOW_PART
)
1966 dest
= XEXP (dest
, 0);
1968 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1969 each of whose first operand is a register. */
1970 if (GET_CODE (dest
) == PARALLEL
)
1972 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1973 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1974 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1977 (*fun
) (dest
, x
, data
);
1980 else if (GET_CODE (x
) == PARALLEL
)
1981 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1982 note_pattern_stores (XVECEXP (x
, 0, i
), fun
, data
);
1985 /* Same, but for an instruction. If the instruction is a call, include
1986 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1989 note_stores (const rtx_insn
*insn
,
1990 void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1993 for (rtx link
= CALL_INSN_FUNCTION_USAGE (insn
);
1994 link
; link
= XEXP (link
, 1))
1995 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1996 note_pattern_stores (XEXP (link
, 0), fun
, data
);
1997 note_pattern_stores (PATTERN (insn
), fun
, data
);
2000 /* Like notes_stores, but call FUN for each expression that is being
2001 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
2002 FUN for each expression, not any interior subexpressions. FUN receives a
2003 pointer to the expression and the DATA passed to this function.
2005 Note that this is not quite the same test as that done in reg_referenced_p
2006 since that considers something as being referenced if it is being
2007 partially set, while we do not. */
2010 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
2015 switch (GET_CODE (body
))
2018 (*fun
) (&COND_EXEC_TEST (body
), data
);
2019 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
2023 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
2024 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
2028 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
2029 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
2033 (*fun
) (&XEXP (body
, 0), data
);
2037 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
2038 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
2042 (*fun
) (&TRAP_CONDITION (body
), data
);
2046 (*fun
) (&XEXP (body
, 0), data
);
2050 case UNSPEC_VOLATILE
:
2051 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
2052 (*fun
) (&XVECEXP (body
, 0, i
), data
);
2056 if (MEM_P (XEXP (body
, 0)))
2057 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
2062 rtx dest
= SET_DEST (body
);
2064 /* For sets we replace everything in source plus registers in memory
2065 expression in store and operands of a ZERO_EXTRACT. */
2066 (*fun
) (&SET_SRC (body
), data
);
2068 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2070 (*fun
) (&XEXP (dest
, 1), data
);
2071 (*fun
) (&XEXP (dest
, 2), data
);
2074 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
2075 dest
= XEXP (dest
, 0);
2078 (*fun
) (&XEXP (dest
, 0), data
);
2083 /* All the other possibilities never store. */
2084 (*fun
) (pbody
, data
);
2089 /* Try to add a description of REG X to this object, stopping once
2090 the REF_END limit has been reached. FLAGS is a bitmask of
2091 rtx_obj_reference flags that describe the context. */
2094 rtx_properties::try_to_add_reg (const_rtx x
, unsigned int flags
)
2096 if (REG_NREGS (x
) != 1)
2097 flags
|= rtx_obj_flags::IS_MULTIREG
;
2098 machine_mode mode
= GET_MODE (x
);
2099 unsigned int start_regno
= REGNO (x
);
2100 unsigned int end_regno
= END_REGNO (x
);
2101 for (unsigned int regno
= start_regno
; regno
< end_regno
; ++regno
)
2102 if (ref_iter
!= ref_end
)
2103 *ref_iter
++ = rtx_obj_reference (regno
, flags
, mode
,
2104 regno
- start_regno
);
2107 /* Add a description of destination X to this object. FLAGS is a bitmask
2108 of rtx_obj_reference flags that describe the context.
2110 This routine accepts all rtxes that can legitimately appear in a
2114 rtx_properties::try_to_add_dest (const_rtx x
, unsigned int flags
)
2116 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
2117 each of whose first operand is a register. */
2118 if (__builtin_expect (GET_CODE (x
) == PARALLEL
, 0))
2120 for (int i
= XVECLEN (x
, 0) - 1; i
>= 0; --i
)
2121 if (rtx dest
= XEXP (XVECEXP (x
, 0, i
), 0))
2122 try_to_add_dest (dest
, flags
);
2126 unsigned int base_flags
= flags
& rtx_obj_flags::STICKY_FLAGS
;
2127 flags
|= rtx_obj_flags::IS_WRITE
;
2129 if (GET_CODE (x
) == ZERO_EXTRACT
)
2131 try_to_add_src (XEXP (x
, 1), base_flags
);
2132 try_to_add_src (XEXP (x
, 2), base_flags
);
2133 flags
|= rtx_obj_flags::IS_READ
;
2136 else if (GET_CODE (x
) == STRICT_LOW_PART
)
2138 flags
|= rtx_obj_flags::IS_READ
;
2141 else if (GET_CODE (x
) == SUBREG
)
2143 flags
|= rtx_obj_flags::IN_SUBREG
;
2144 if (read_modify_subreg_p (x
))
2145 flags
|= rtx_obj_flags::IS_READ
;
2153 if (ref_iter
!= ref_end
)
2154 *ref_iter
++ = rtx_obj_reference (MEM_REGNO
, flags
, GET_MODE (x
));
2156 unsigned int addr_flags
= base_flags
| rtx_obj_flags::IN_MEM_STORE
;
2157 if (flags
& rtx_obj_flags::IS_READ
)
2158 addr_flags
|= rtx_obj_flags::IN_MEM_LOAD
;
2159 try_to_add_src (XEXP (x
, 0), addr_flags
);
2163 if (__builtin_expect (REG_P (x
), 1))
2165 /* We want to keep sp alive everywhere - by making all
2166 writes to sp also use sp. */
2167 if (REGNO (x
) == STACK_POINTER_REGNUM
)
2168 flags
|= rtx_obj_flags::IS_READ
;
2169 try_to_add_reg (x
, flags
);
2174 /* Try to add a description of source X to this object, stopping once
2175 the REF_END limit has been reached. FLAGS is a bitmask of
2176 rtx_obj_reference flags that describe the context.
2178 This routine accepts all rtxes that can legitimately appear in a SET_SRC. */
2181 rtx_properties::try_to_add_src (const_rtx x
, unsigned int flags
)
2183 unsigned int base_flags
= flags
& rtx_obj_flags::STICKY_FLAGS
;
2184 subrtx_iterator::array_type array
;
2185 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
2187 const_rtx x
= *iter
;
2188 rtx_code code
= GET_CODE (x
);
2190 try_to_add_reg (x
, flags
| rtx_obj_flags::IS_READ
);
2191 else if (code
== MEM
)
2193 if (MEM_VOLATILE_P (x
))
2194 has_volatile_refs
= true;
2196 if (!MEM_READONLY_P (x
) && ref_iter
!= ref_end
)
2198 auto mem_flags
= flags
| rtx_obj_flags::IS_READ
;
2199 *ref_iter
++ = rtx_obj_reference (MEM_REGNO
, mem_flags
,
2203 try_to_add_src (XEXP (x
, 0),
2204 base_flags
| rtx_obj_flags::IN_MEM_LOAD
);
2205 iter
.skip_subrtxes ();
2207 else if (code
== SUBREG
)
2209 try_to_add_src (SUBREG_REG (x
), flags
| rtx_obj_flags::IN_SUBREG
);
2210 iter
.skip_subrtxes ();
2212 else if (code
== UNSPEC_VOLATILE
)
2213 has_volatile_refs
= true;
2214 else if (code
== ASM_INPUT
|| code
== ASM_OPERANDS
)
2217 if (MEM_VOLATILE_P (x
))
2218 has_volatile_refs
= true;
2220 else if (code
== PRE_INC
2224 || code
== PRE_MODIFY
2225 || code
== POST_MODIFY
)
2227 has_pre_post_modify
= true;
2229 unsigned int addr_flags
= (base_flags
2230 | rtx_obj_flags::IS_PRE_POST_MODIFY
2231 | rtx_obj_flags::IS_READ
);
2232 try_to_add_dest (XEXP (x
, 0), addr_flags
);
2233 if (code
== PRE_MODIFY
|| code
== POST_MODIFY
)
2234 iter
.substitute (XEXP (XEXP (x
, 1), 1));
2236 iter
.skip_subrtxes ();
2238 else if (code
== CALL
)
2243 /* Try to add a description of instruction pattern PAT to this object,
2244 stopping once the REF_END limit has been reached. */
2247 rtx_properties::try_to_add_pattern (const_rtx pat
)
2249 switch (GET_CODE (pat
))
2252 try_to_add_src (COND_EXEC_TEST (pat
));
2253 try_to_add_pattern (COND_EXEC_CODE (pat
));
2258 int last
= XVECLEN (pat
, 0) - 1;
2259 for (int i
= 0; i
< last
; ++i
)
2260 try_to_add_pattern (XVECEXP (pat
, 0, i
));
2261 try_to_add_pattern (XVECEXP (pat
, 0, last
));
2266 for (int i
= 0, len
= ASM_OPERANDS_INPUT_LENGTH (pat
); i
< len
; ++i
)
2267 try_to_add_src (ASM_OPERANDS_INPUT (pat
, i
));
2271 try_to_add_dest (XEXP (pat
, 0), rtx_obj_flags::IS_CLOBBER
);
2275 try_to_add_dest (SET_DEST (pat
));
2276 try_to_add_src (SET_SRC (pat
));
2280 /* All the other possibilities never store and can use a normal
2281 rtx walk. This includes:
2287 - UNSPEC_VOLATILE. */
2288 try_to_add_src (pat
);
2293 /* Try to add a description of INSN to this object, stopping once
2294 the REF_END limit has been reached. INCLUDE_NOTES is true if the
2295 description should include REG_EQUAL and REG_EQUIV notes; all such
2296 references will then be marked with rtx_obj_flags::IN_NOTE.
2298 For calls, this description includes all accesses in
2299 CALL_INSN_FUNCTION_USAGE. It also include all implicit accesses
2300 to global registers by the target function. However, it does not
2301 include clobbers performed by the target function; callers that want
2302 this information should instead use the function_abi interface. */
2305 rtx_properties::try_to_add_insn (const rtx_insn
*insn
, bool include_notes
)
2309 /* Adding the global registers first removes a situation in which
2310 a fixed-form clobber of register R could come before a real set
2312 if (!hard_reg_set_empty_p (global_reg_set
))
2314 unsigned int flags
= (rtx_obj_flags::IS_READ
2315 | rtx_obj_flags::IS_WRITE
);
2316 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; ++regno
)
2317 if (global_regs
[regno
] && ref_iter
!= ref_end
)
2318 *ref_iter
++ = rtx_obj_reference (regno
, flags
,
2319 reg_raw_mode
[regno
], 0);
2321 if (ref_iter
!= ref_end
&& !RTL_CONST_CALL_P (insn
))
2323 auto mem_flags
= rtx_obj_flags::IS_READ
;
2324 if (!RTL_PURE_CALL_P (insn
))
2325 mem_flags
|= rtx_obj_flags::IS_WRITE
;
2326 *ref_iter
++ = rtx_obj_reference (MEM_REGNO
, mem_flags
, BLKmode
);
2328 try_to_add_pattern (PATTERN (insn
));
2329 for (rtx link
= CALL_INSN_FUNCTION_USAGE (insn
); link
;
2330 link
= XEXP (link
, 1))
2332 rtx x
= XEXP (link
, 0);
2333 if (GET_CODE (x
) == CLOBBER
)
2334 try_to_add_dest (XEXP (x
, 0), rtx_obj_flags::IS_CLOBBER
);
2335 else if (GET_CODE (x
) == USE
)
2336 try_to_add_src (XEXP (x
, 0));
2340 try_to_add_pattern (PATTERN (insn
));
2343 for (rtx note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2344 if (REG_NOTE_KIND (note
) == REG_EQUAL
2345 || REG_NOTE_KIND (note
) == REG_EQUIV
)
2346 try_to_add_note (XEXP (note
, 0));
2349 /* Grow the storage by a bit while keeping the contents of the first
2353 vec_rtx_properties_base::grow (ptrdiff_t start
)
2355 /* The same heuristic that vec uses. */
2356 ptrdiff_t new_elems
= (ref_end
- ref_begin
) * 3 / 2;
2357 if (ref_begin
== m_storage
)
2359 ref_begin
= XNEWVEC (rtx_obj_reference
, new_elems
);
2361 memcpy (ref_begin
, m_storage
, start
* sizeof (rtx_obj_reference
));
2364 ref_begin
= reinterpret_cast<rtx_obj_reference
*>
2365 (xrealloc (ref_begin
, new_elems
* sizeof (rtx_obj_reference
)));
2366 ref_iter
= ref_begin
+ start
;
2367 ref_end
= ref_begin
+ new_elems
;
2370 /* Return nonzero if X's old contents don't survive after INSN.
2371 This will be true if X is (cc0) or if X is a register and
2372 X dies in INSN or because INSN entirely sets X.
2374 "Entirely set" means set directly and not through a SUBREG, or
2375 ZERO_EXTRACT, so no trace of the old contents remains.
2376 Likewise, REG_INC does not count.
2378 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2379 but for this use that makes no difference, since regs don't overlap
2380 during their lifetimes. Therefore, this function may be used
2381 at any time after deaths have been computed.
2383 If REG is a hard reg that occupies multiple machine registers, this
2384 function will only return 1 if each of those registers will be replaced
2388 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
2390 unsigned int regno
, end_regno
;
2393 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2394 if (GET_CODE (x
) == CC0
)
2397 gcc_assert (REG_P (x
));
2400 end_regno
= END_REGNO (x
);
2401 for (i
= regno
; i
< end_regno
; i
++)
2402 if (! dead_or_set_regno_p (insn
, i
))
2408 /* Return TRUE iff DEST is a register or subreg of a register, is a
2409 complete rather than read-modify-write destination, and contains
2410 register TEST_REGNO. */
2413 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2415 unsigned int regno
, endregno
;
2417 if (GET_CODE (dest
) == SUBREG
&& !read_modify_subreg_p (dest
))
2418 dest
= SUBREG_REG (dest
);
2423 regno
= REGNO (dest
);
2424 endregno
= END_REGNO (dest
);
2425 return (test_regno
>= regno
&& test_regno
< endregno
);
2428 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2429 any member matches the covers_regno_no_parallel_p criteria. */
2432 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2434 if (GET_CODE (dest
) == PARALLEL
)
2436 /* Some targets place small structures in registers for return
2437 values of functions, and those registers are wrapped in
2438 PARALLELs that we may see as the destination of a SET. */
2441 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2443 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2444 if (inner
!= NULL_RTX
2445 && covers_regno_no_parallel_p (inner
, test_regno
))
2452 return covers_regno_no_parallel_p (dest
, test_regno
);
2455 /* Utility function for dead_or_set_p to check an individual register. */
2458 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2462 /* See if there is a death note for something that includes TEST_REGNO. */
2463 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2467 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2470 pattern
= PATTERN (insn
);
2472 /* If a COND_EXEC is not executed, the value survives. */
2473 if (GET_CODE (pattern
) == COND_EXEC
)
2476 if (GET_CODE (pattern
) == SET
|| GET_CODE (pattern
) == CLOBBER
)
2477 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2478 else if (GET_CODE (pattern
) == PARALLEL
)
2482 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2484 rtx body
= XVECEXP (pattern
, 0, i
);
2486 if (GET_CODE (body
) == COND_EXEC
)
2487 body
= COND_EXEC_CODE (body
);
2489 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2490 && covers_regno_p (SET_DEST (body
), test_regno
))
2498 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2499 If DATUM is nonzero, look for one whose datum is DATUM. */
2502 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2506 gcc_checking_assert (insn
);
2508 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2509 if (! INSN_P (insn
))
2513 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2514 if (REG_NOTE_KIND (link
) == kind
)
2519 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2520 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2525 /* Return the reg-note of kind KIND in insn INSN which applies to register
2526 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2527 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2528 it might be the case that the note overlaps REGNO. */
2531 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2535 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2536 if (! INSN_P (insn
))
2539 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2540 if (REG_NOTE_KIND (link
) == kind
2541 /* Verify that it is a register, so that scratch and MEM won't cause a
2543 && REG_P (XEXP (link
, 0))
2544 && REGNO (XEXP (link
, 0)) <= regno
2545 && END_REGNO (XEXP (link
, 0)) > regno
)
2550 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2554 find_reg_equal_equiv_note (const_rtx insn
)
2561 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2562 if (REG_NOTE_KIND (link
) == REG_EQUAL
2563 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2565 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2566 insns that have multiple sets. Checking single_set to
2567 make sure of this is not the proper check, as explained
2568 in the comment in set_unique_reg_note.
2570 This should be changed into an assert. */
2571 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2578 /* Check whether INSN is a single_set whose source is known to be
2579 equivalent to a constant. Return that constant if so, otherwise
2583 find_constant_src (const rtx_insn
*insn
)
2587 set
= single_set (insn
);
2590 x
= avoid_constant_pool_reference (SET_SRC (set
));
2595 note
= find_reg_equal_equiv_note (insn
);
2596 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2597 return XEXP (note
, 0);
2602 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2603 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2606 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2608 /* If it's not a CALL_INSN, it can't possibly have a
2609 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2619 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2621 link
= XEXP (link
, 1))
2622 if (GET_CODE (XEXP (link
, 0)) == code
2623 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2628 unsigned int regno
= REGNO (datum
);
2630 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2631 to pseudo registers, so don't bother checking. */
2633 if (regno
< FIRST_PSEUDO_REGISTER
)
2635 unsigned int end_regno
= END_REGNO (datum
);
2638 for (i
= regno
; i
< end_regno
; i
++)
2639 if (find_regno_fusage (insn
, code
, i
))
2647 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2648 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2651 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2655 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2656 to pseudo registers, so don't bother checking. */
2658 if (regno
>= FIRST_PSEUDO_REGISTER
2662 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2666 if (GET_CODE (op
= XEXP (link
, 0)) == code
2667 && REG_P (reg
= XEXP (op
, 0))
2668 && REGNO (reg
) <= regno
2669 && END_REGNO (reg
) > regno
)
2677 /* Return true if KIND is an integer REG_NOTE. */
2680 int_reg_note_p (enum reg_note kind
)
2682 return kind
== REG_BR_PROB
;
2685 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2686 stored as the pointer to the next register note. */
2689 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2693 gcc_checking_assert (!int_reg_note_p (kind
));
2698 case REG_LABEL_TARGET
:
2699 case REG_LABEL_OPERAND
:
2701 /* These types of register notes use an INSN_LIST rather than an
2702 EXPR_LIST, so that copying is done right and dumps look
2704 note
= alloc_INSN_LIST (datum
, list
);
2705 PUT_REG_NOTE_KIND (note
, kind
);
2709 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2716 /* Add register note with kind KIND and datum DATUM to INSN. */
2719 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2721 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2724 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2727 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2729 gcc_checking_assert (int_reg_note_p (kind
));
2730 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2731 datum
, REG_NOTES (insn
));
2734 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2737 add_args_size_note (rtx_insn
*insn
, poly_int64 value
)
2739 gcc_checking_assert (!find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
));
2740 add_reg_note (insn
, REG_ARGS_SIZE
, gen_int_mode (value
, Pmode
));
2743 /* Add a register note like NOTE to INSN. */
2746 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2748 if (GET_CODE (note
) == INT_LIST
)
2749 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2751 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2754 /* Duplicate NOTE and return the copy. */
2756 duplicate_reg_note (rtx note
)
2758 reg_note kind
= REG_NOTE_KIND (note
);
2760 if (GET_CODE (note
) == INT_LIST
)
2761 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2762 else if (GET_CODE (note
) == EXPR_LIST
)
2763 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2765 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2768 /* Remove register note NOTE from the REG_NOTES of INSN. */
2771 remove_note (rtx_insn
*insn
, const_rtx note
)
2775 if (note
== NULL_RTX
)
2778 if (REG_NOTES (insn
) == note
)
2779 REG_NOTES (insn
) = XEXP (note
, 1);
2781 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2782 if (XEXP (link
, 1) == note
)
2784 XEXP (link
, 1) = XEXP (note
, 1);
2788 switch (REG_NOTE_KIND (note
))
2792 df_notes_rescan (insn
);
2799 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2800 If NO_RESCAN is false and any notes were removed, call
2801 df_notes_rescan. Return true if any note has been removed. */
2804 remove_reg_equal_equiv_notes (rtx_insn
*insn
, bool no_rescan
)
2809 loc
= ®_NOTES (insn
);
2812 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2813 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2815 *loc
= XEXP (*loc
, 1);
2819 loc
= &XEXP (*loc
, 1);
2821 if (ret
&& !no_rescan
)
2822 df_notes_rescan (insn
);
2826 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2829 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2836 /* This loop is a little tricky. We cannot just go down the chain because
2837 it is being modified by some actions in the loop. So we just iterate
2838 over the head. We plan to drain the list anyway. */
2839 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2841 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2842 rtx note
= find_reg_equal_equiv_note (insn
);
2844 /* This assert is generally triggered when someone deletes a REG_EQUAL
2845 or REG_EQUIV note by hacking the list manually rather than calling
2849 remove_note (insn
, note
);
2853 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2854 return 1 if it is found. A simple equality test is used to determine if
2858 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2862 for (x
= listp
; x
; x
= XEXP (x
, 1))
2863 if (node
== XEXP (x
, 0))
2869 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2870 remove that entry from the list if it is found.
2872 A simple equality test is used to determine if NODE matches. */
2875 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2877 rtx_expr_list
*temp
= *listp
;
2878 rtx_expr_list
*prev
= NULL
;
2882 if (node
== temp
->element ())
2884 /* Splice the node out of the list. */
2886 XEXP (prev
, 1) = temp
->next ();
2888 *listp
= temp
->next ();
2894 temp
= temp
->next ();
2898 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2899 remove that entry from the list if it is found.
2901 A simple equality test is used to determine if NODE matches. */
2904 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2906 rtx_insn_list
*temp
= *listp
;
2907 rtx_insn_list
*prev
= NULL
;
2911 if (node
== temp
->insn ())
2913 /* Splice the node out of the list. */
2915 XEXP (prev
, 1) = temp
->next ();
2917 *listp
= temp
->next ();
2923 temp
= temp
->next ();
2927 /* Nonzero if X contains any volatile instructions. These are instructions
2928 which may cause unpredictable machine state instructions, and thus no
2929 instructions or register uses should be moved or combined across them.
2930 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2933 volatile_insn_p (const_rtx x
)
2935 const RTX_CODE code
= GET_CODE (x
);
2953 case UNSPEC_VOLATILE
:
2958 if (MEM_VOLATILE_P (x
))
2965 /* Recursively scan the operands of this expression. */
2968 const char *const fmt
= GET_RTX_FORMAT (code
);
2971 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2975 if (volatile_insn_p (XEXP (x
, i
)))
2978 else if (fmt
[i
] == 'E')
2981 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2982 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2990 /* Nonzero if X contains any volatile memory references
2991 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2994 volatile_refs_p (const_rtx x
)
2996 const RTX_CODE code
= GET_CODE (x
);
3012 case UNSPEC_VOLATILE
:
3018 if (MEM_VOLATILE_P (x
))
3025 /* Recursively scan the operands of this expression. */
3028 const char *const fmt
= GET_RTX_FORMAT (code
);
3031 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3035 if (volatile_refs_p (XEXP (x
, i
)))
3038 else if (fmt
[i
] == 'E')
3041 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3042 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
3050 /* Similar to above, except that it also rejects register pre- and post-
3054 side_effects_p (const_rtx x
)
3056 const RTX_CODE code
= GET_CODE (x
);
3073 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
3074 when some combination can't be done. If we see one, don't think
3075 that we can simplify the expression. */
3076 return (GET_MODE (x
) != VOIDmode
);
3085 case UNSPEC_VOLATILE
:
3091 if (MEM_VOLATILE_P (x
))
3098 /* Recursively scan the operands of this expression. */
3101 const char *fmt
= GET_RTX_FORMAT (code
);
3104 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3108 if (side_effects_p (XEXP (x
, i
)))
3111 else if (fmt
[i
] == 'E')
3114 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3115 if (side_effects_p (XVECEXP (x
, i
, j
)))
3123 /* Return nonzero if evaluating rtx X might cause a trap.
3124 FLAGS controls how to consider MEMs. A nonzero means the context
3125 of the access may have changed from the original, such that the
3126 address may have become invalid. */
3129 may_trap_p_1 (const_rtx x
, unsigned flags
)
3135 /* We make no distinction currently, but this function is part of
3136 the internal target-hooks ABI so we keep the parameter as
3137 "unsigned flags". */
3138 bool code_changed
= flags
!= 0;
3142 code
= GET_CODE (x
);
3145 /* Handle these cases quickly. */
3157 return targetm
.unspec_may_trap_p (x
, flags
);
3159 case UNSPEC_VOLATILE
:
3165 return MEM_VOLATILE_P (x
);
3167 /* Memory ref can trap unless it's a static var or a stack slot. */
3169 /* Recognize specific pattern of stack checking probes. */
3170 if (flag_stack_check
3171 && MEM_VOLATILE_P (x
)
3172 && XEXP (x
, 0) == stack_pointer_rtx
)
3174 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
3175 reference; moving it out of context such as when moving code
3176 when optimizing, might cause its address to become invalid. */
3178 || !MEM_NOTRAP_P (x
))
3180 poly_int64 size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : -1;
3181 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
3182 GET_MODE (x
), code_changed
);
3187 /* Division by a non-constant might trap. */
3192 if (HONOR_SNANS (x
))
3194 if (FLOAT_MODE_P (GET_MODE (x
)))
3195 return flag_trapping_math
;
3196 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
3198 if (GET_CODE (XEXP (x
, 1)) == CONST_VECTOR
)
3200 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
3201 unsigned int n_elts
;
3202 rtx op
= XEXP (x
, 1);
3203 if (!GET_MODE_NUNITS (GET_MODE (op
)).is_constant (&n_elts
))
3205 if (!CONST_VECTOR_DUPLICATE_P (op
))
3207 for (unsigned i
= 0; i
< (unsigned int) XVECLEN (op
, 0); i
++)
3208 if (CONST_VECTOR_ENCODED_ELT (op
, i
) == const0_rtx
)
3212 for (unsigned i
= 0; i
< n_elts
; i
++)
3213 if (CONST_VECTOR_ELT (op
, i
) == const0_rtx
)
3219 /* An EXPR_LIST is used to represent a function call. This
3220 certainly may trap. */
3229 /* Some floating point comparisons may trap. */
3230 if (!flag_trapping_math
)
3232 /* ??? There is no machine independent way to check for tests that trap
3233 when COMPARE is used, though many targets do make this distinction.
3234 For instance, sparc uses CCFPE for compares which generate exceptions
3235 and CCFP for compares which do not generate exceptions. */
3238 /* But often the compare has some CC mode, so check operand
3240 if (HONOR_NANS (XEXP (x
, 0))
3241 || HONOR_NANS (XEXP (x
, 1)))
3247 if (HONOR_SNANS (x
))
3249 /* Often comparison is CC mode, so check operand modes. */
3250 if (HONOR_SNANS (XEXP (x
, 0))
3251 || HONOR_SNANS (XEXP (x
, 1)))
3256 /* Conversion of floating point might trap. */
3257 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
3268 /* These operations don't trap even with floating point. */
3272 /* Any floating arithmetic may trap. */
3273 if (FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
3277 fmt
= GET_RTX_FORMAT (code
);
3278 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3282 if (may_trap_p_1 (XEXP (x
, i
), flags
))
3285 else if (fmt
[i
] == 'E')
3288 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3289 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
3296 /* Return nonzero if evaluating rtx X might cause a trap. */
3299 may_trap_p (const_rtx x
)
3301 return may_trap_p_1 (x
, 0);
3304 /* Same as above, but additionally return nonzero if evaluating rtx X might
3305 cause a fault. We define a fault for the purpose of this function as a
3306 erroneous execution condition that cannot be encountered during the normal
3307 execution of a valid program; the typical example is an unaligned memory
3308 access on a strict alignment machine. The compiler guarantees that it
3309 doesn't generate code that will fault from a valid program, but this
3310 guarantee doesn't mean anything for individual instructions. Consider
3311 the following example:
3313 struct S { int d; union { char *cp; int *ip; }; };
3315 int foo(struct S *s)
3323 on a strict alignment machine. In a valid program, foo will never be
3324 invoked on a structure for which d is equal to 1 and the underlying
3325 unique field of the union not aligned on a 4-byte boundary, but the
3326 expression *s->ip might cause a fault if considered individually.
3328 At the RTL level, potentially problematic expressions will almost always
3329 verify may_trap_p; for example, the above dereference can be emitted as
3330 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3331 However, suppose that foo is inlined in a caller that causes s->cp to
3332 point to a local character variable and guarantees that s->d is not set
3333 to 1; foo may have been effectively translated into pseudo-RTL as:
3336 (set (reg:SI) (mem:SI (%fp - 7)))
3338 (set (reg:QI) (mem:QI (%fp - 7)))
3340 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3341 memory reference to a stack slot, but it will certainly cause a fault
3342 on a strict alignment machine. */
3345 may_trap_or_fault_p (const_rtx x
)
3347 return may_trap_p_1 (x
, 1);
3350 /* Replace any occurrence of FROM in X with TO. The function does
3351 not enter into CONST_DOUBLE for the replace.
3353 Note that copying is not done so X must not be shared unless all copies
3356 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3357 those pointer-equal ones. */
3360 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
3368 /* Allow this function to make replacements in EXPR_LISTs. */
3375 && REGNO (x
) == REGNO (from
))
3377 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3380 else if (GET_CODE (x
) == SUBREG
)
3382 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3384 if (CONST_INT_P (new_rtx
))
3386 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3387 GET_MODE (SUBREG_REG (x
)),
3392 SUBREG_REG (x
) = new_rtx
;
3396 else if (GET_CODE (x
) == ZERO_EXTEND
)
3398 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3400 if (CONST_INT_P (new_rtx
))
3402 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3403 new_rtx
, GET_MODE (XEXP (x
, 0)));
3407 XEXP (x
, 0) = new_rtx
;
3412 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3413 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3416 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3417 else if (fmt
[i
] == 'E')
3418 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3419 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3420 from
, to
, all_regs
);
3426 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3427 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3430 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3432 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3434 if (JUMP_TABLE_DATA_P (x
))
3437 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3438 int len
= GET_NUM_ELEM (vec
);
3439 for (int i
= 0; i
< len
; ++i
)
3441 rtx ref
= RTVEC_ELT (vec
, i
);
3442 if (XEXP (ref
, 0) == old_label
)
3444 XEXP (ref
, 0) = new_label
;
3445 if (update_label_nuses
)
3447 ++LABEL_NUSES (new_label
);
3448 --LABEL_NUSES (old_label
);
3455 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3456 field. This is not handled by the iterator because it doesn't
3457 handle unprinted ('0') fields. */
3458 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3459 JUMP_LABEL (x
) = new_label
;
3461 subrtx_ptr_iterator::array_type array
;
3462 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3467 if (GET_CODE (x
) == SYMBOL_REF
3468 && CONSTANT_POOL_ADDRESS_P (x
))
3470 rtx c
= get_pool_constant (x
);
3471 if (rtx_referenced_p (old_label
, c
))
3473 /* Create a copy of constant C; replace the label inside
3474 but do not update LABEL_NUSES because uses in constant pool
3476 rtx new_c
= copy_rtx (c
);
3477 replace_label (&new_c
, old_label
, new_label
, false);
3479 /* Add the new constant NEW_C to constant pool and replace
3480 the old reference to constant by new reference. */
3481 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3482 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3486 if ((GET_CODE (x
) == LABEL_REF
3487 || GET_CODE (x
) == INSN_LIST
)
3488 && XEXP (x
, 0) == old_label
)
3490 XEXP (x
, 0) = new_label
;
3491 if (update_label_nuses
)
3493 ++LABEL_NUSES (new_label
);
3494 --LABEL_NUSES (old_label
);
3502 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3503 rtx_insn
*new_label
, bool update_label_nuses
)
3505 rtx insn_as_rtx
= insn
;
3506 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3507 gcc_checking_assert (insn_as_rtx
== insn
);
3510 /* Return true if X is referenced in BODY. */
3513 rtx_referenced_p (const_rtx x
, const_rtx body
)
3515 subrtx_iterator::array_type array
;
3516 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3517 if (const_rtx y
= *iter
)
3519 /* Check if a label_ref Y refers to label X. */
3520 if (GET_CODE (y
) == LABEL_REF
3522 && label_ref_label (y
) == x
)
3525 if (rtx_equal_p (x
, y
))
3528 /* If Y is a reference to pool constant traverse the constant. */
3529 if (GET_CODE (y
) == SYMBOL_REF
3530 && CONSTANT_POOL_ADDRESS_P (y
))
3531 iter
.substitute (get_pool_constant (y
));
3536 /* If INSN is a tablejump return true and store the label (before jump table) to
3537 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3540 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3541 rtx_jump_table_data
**tablep
)
3546 rtx target
= JUMP_LABEL (insn
);
3547 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3550 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3551 rtx_insn
*table
= next_insn (label
);
3552 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3558 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3562 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3563 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3566 tablejump_casesi_pattern (const rtx_insn
*insn
)
3570 if ((tmp
= single_set (insn
)) != NULL
3571 && SET_DEST (tmp
) == pc_rtx
3572 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3573 && GET_CODE (XEXP (SET_SRC (tmp
), 2)) == LABEL_REF
)
3579 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3580 constant that is not in the constant pool and not in the condition
3581 of an IF_THEN_ELSE. */
3584 computed_jump_p_1 (const_rtx x
)
3586 const enum rtx_code code
= GET_CODE (x
);
3603 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3604 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3607 return (computed_jump_p_1 (XEXP (x
, 1))
3608 || computed_jump_p_1 (XEXP (x
, 2)));
3614 fmt
= GET_RTX_FORMAT (code
);
3615 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3618 && computed_jump_p_1 (XEXP (x
, i
)))
3621 else if (fmt
[i
] == 'E')
3622 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3623 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3630 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3632 Tablejumps and casesi insns are not considered indirect jumps;
3633 we can recognize them by a (use (label_ref)). */
3636 computed_jump_p (const rtx_insn
*insn
)
3641 rtx pat
= PATTERN (insn
);
3643 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3644 if (JUMP_LABEL (insn
) != NULL
)
3647 if (GET_CODE (pat
) == PARALLEL
)
3649 int len
= XVECLEN (pat
, 0);
3650 int has_use_labelref
= 0;
3652 for (i
= len
- 1; i
>= 0; i
--)
3653 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3654 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3657 has_use_labelref
= 1;
3661 if (! has_use_labelref
)
3662 for (i
= len
- 1; i
>= 0; i
--)
3663 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3664 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3665 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3668 else if (GET_CODE (pat
) == SET
3669 && SET_DEST (pat
) == pc_rtx
3670 && computed_jump_p_1 (SET_SRC (pat
)))
3678 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3679 the equivalent add insn and pass the result to FN, using DATA as the
3683 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3685 rtx x
= XEXP (mem
, 0);
3686 switch (GET_CODE (x
))
3691 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3692 rtx r1
= XEXP (x
, 0);
3693 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3694 return fn (mem
, x
, r1
, r1
, c
, data
);
3700 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3701 rtx r1
= XEXP (x
, 0);
3702 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3703 return fn (mem
, x
, r1
, r1
, c
, data
);
3709 rtx r1
= XEXP (x
, 0);
3710 rtx add
= XEXP (x
, 1);
3711 return fn (mem
, x
, r1
, add
, NULL
, data
);
3719 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3720 For each such autoinc operation found, call FN, passing it
3721 the innermost enclosing MEM, the operation itself, the RTX modified
3722 by the operation, two RTXs (the second may be NULL) that, once
3723 added, represent the value to be held by the modified RTX
3724 afterwards, and DATA. FN is to return 0 to continue the
3725 traversal or any other value to have it returned to the caller of
3726 for_each_inc_dec. */
3729 for_each_inc_dec (rtx x
,
3730 for_each_inc_dec_fn fn
,
3733 subrtx_var_iterator::array_type array
;
3734 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3739 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3741 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3744 iter
.skip_subrtxes ();
3751 /* Searches X for any reference to REGNO, returning the rtx of the
3752 reference found if any. Otherwise, returns NULL_RTX. */
3755 regno_use_in (unsigned int regno
, rtx x
)
3761 if (REG_P (x
) && REGNO (x
) == regno
)
3764 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3765 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3769 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3772 else if (fmt
[i
] == 'E')
3773 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3774 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3781 /* Return a value indicating whether OP, an operand of a commutative
3782 operation, is preferred as the first or second operand. The more
3783 positive the value, the stronger the preference for being the first
3787 commutative_operand_precedence (rtx op
)
3789 enum rtx_code code
= GET_CODE (op
);
3791 /* Constants always become the second operand. Prefer "nice" constants. */
3792 if (code
== CONST_INT
)
3794 if (code
== CONST_WIDE_INT
)
3796 if (code
== CONST_POLY_INT
)
3798 if (code
== CONST_DOUBLE
)
3800 if (code
== CONST_FIXED
)
3802 op
= avoid_constant_pool_reference (op
);
3803 code
= GET_CODE (op
);
3805 switch (GET_RTX_CLASS (code
))
3808 if (code
== CONST_INT
)
3810 if (code
== CONST_WIDE_INT
)
3812 if (code
== CONST_POLY_INT
)
3814 if (code
== CONST_DOUBLE
)
3816 if (code
== CONST_FIXED
)
3821 /* SUBREGs of objects should come second. */
3822 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3827 /* Complex expressions should be the first, so decrease priority
3828 of objects. Prefer pointer objects over non pointer objects. */
3829 if ((REG_P (op
) && REG_POINTER (op
))
3830 || (MEM_P (op
) && MEM_POINTER (op
)))
3834 case RTX_COMM_ARITH
:
3835 /* Prefer operands that are themselves commutative to be first.
3836 This helps to make things linear. In particular,
3837 (and (and (reg) (reg)) (not (reg))) is canonical. */
3841 /* If only one operand is a binary expression, it will be the first
3842 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3843 is canonical, although it will usually be further simplified. */
3847 /* Then prefer NEG and NOT. */
3848 if (code
== NEG
|| code
== NOT
)
3857 /* Return 1 iff it is necessary to swap operands of commutative operation
3858 in order to canonicalize expression. */
3861 swap_commutative_operands_p (rtx x
, rtx y
)
3863 return (commutative_operand_precedence (x
)
3864 < commutative_operand_precedence (y
));
3867 /* Return 1 if X is an autoincrement side effect and the register is
3868 not the stack pointer. */
3870 auto_inc_p (const_rtx x
)
3872 switch (GET_CODE (x
))
3880 /* There are no REG_INC notes for SP. */
3881 if (XEXP (x
, 0) != stack_pointer_rtx
)
3889 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3891 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3900 code
= GET_CODE (in
);
3901 fmt
= GET_RTX_FORMAT (code
);
3902 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3906 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3909 else if (fmt
[i
] == 'E')
3910 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3911 if (loc
== &XVECEXP (in
, i
, j
)
3912 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3918 /* Reinterpret a subreg as a bit extraction from an integer and return
3919 the position of the least significant bit of the extracted value.
3920 In other words, if the extraction were performed as a shift right
3921 and mask, return the number of bits to shift right.
3923 The outer value of the subreg has OUTER_BYTES bytes and starts at
3924 byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes. */
3927 subreg_size_lsb (poly_uint64 outer_bytes
,
3928 poly_uint64 inner_bytes
,
3929 poly_uint64 subreg_byte
)
3931 poly_uint64 subreg_end
, trailing_bytes
, byte_pos
;
3933 /* A paradoxical subreg begins at bit position 0. */
3934 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3935 if (maybe_gt (outer_bytes
, inner_bytes
))
3937 gcc_checking_assert (known_eq (subreg_byte
, 0U));
3941 subreg_end
= subreg_byte
+ outer_bytes
;
3942 trailing_bytes
= inner_bytes
- subreg_end
;
3943 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3944 byte_pos
= trailing_bytes
;
3945 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3946 byte_pos
= subreg_byte
;
3949 /* When bytes and words have opposite endianness, we must be able
3950 to split offsets into words and bytes at compile time. */
3951 poly_uint64 leading_word_part
3952 = force_align_down (subreg_byte
, UNITS_PER_WORD
);
3953 poly_uint64 trailing_word_part
3954 = force_align_down (trailing_bytes
, UNITS_PER_WORD
);
3955 /* If the subreg crosses a word boundary ensure that
3956 it also begins and ends on a word boundary. */
3957 gcc_assert (known_le (subreg_end
- leading_word_part
,
3958 (unsigned int) UNITS_PER_WORD
)
3959 || (known_eq (leading_word_part
, subreg_byte
)
3960 && known_eq (trailing_word_part
, trailing_bytes
)));
3961 if (WORDS_BIG_ENDIAN
)
3962 byte_pos
= trailing_word_part
+ (subreg_byte
- leading_word_part
);
3964 byte_pos
= leading_word_part
+ (trailing_bytes
- trailing_word_part
);
3967 return byte_pos
* BITS_PER_UNIT
;
3970 /* Given a subreg X, return the bit offset where the subreg begins
3971 (counting from the least significant bit of the reg). */
3974 subreg_lsb (const_rtx x
)
3976 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3980 /* Return the subreg byte offset for a subreg whose outer value has
3981 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3982 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3983 lsb of the inner value. This is the inverse of the calculation
3984 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3987 subreg_size_offset_from_lsb (poly_uint64 outer_bytes
, poly_uint64 inner_bytes
,
3988 poly_uint64 lsb_shift
)
3990 /* A paradoxical subreg begins at bit position 0. */
3991 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3992 if (maybe_gt (outer_bytes
, inner_bytes
))
3994 gcc_checking_assert (known_eq (lsb_shift
, 0U));
3998 poly_uint64 lower_bytes
= exact_div (lsb_shift
, BITS_PER_UNIT
);
3999 poly_uint64 upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
4000 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
4002 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
4006 /* When bytes and words have opposite endianness, we must be able
4007 to split offsets into words and bytes at compile time. */
4008 poly_uint64 lower_word_part
= force_align_down (lower_bytes
,
4010 poly_uint64 upper_word_part
= force_align_down (upper_bytes
,
4012 if (WORDS_BIG_ENDIAN
)
4013 return upper_word_part
+ (lower_bytes
- lower_word_part
);
4015 return lower_word_part
+ (upper_bytes
- upper_word_part
);
4019 /* Fill in information about a subreg of a hard register.
4020 xregno - A regno of an inner hard subreg_reg (or what will become one).
4021 xmode - The mode of xregno.
4022 offset - The byte offset.
4023 ymode - The mode of a top level SUBREG (or what may become one).
4024 info - Pointer to structure to fill in.
4026 Rather than considering one particular inner register (and thus one
4027 particular "outer" register) in isolation, this function really uses
4028 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
4029 function does not check whether adding INFO->offset to XREGNO gives
4030 a valid hard register; even if INFO->offset + XREGNO is out of range,
4031 there might be another register of the same type that is in range.
4032 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
4033 the new register, since that can depend on things like whether the final
4034 register number is even or odd. Callers that want to check whether
4035 this particular subreg can be replaced by a simple (reg ...) should
4036 use simplify_subreg_regno. */
4039 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
4040 poly_uint64 offset
, machine_mode ymode
,
4041 struct subreg_info
*info
)
4043 unsigned int nregs_xmode
, nregs_ymode
;
4045 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
4047 poly_uint64 xsize
= GET_MODE_SIZE (xmode
);
4048 poly_uint64 ysize
= GET_MODE_SIZE (ymode
);
4050 bool rknown
= false;
4052 /* If the register representation of a non-scalar mode has holes in it,
4053 we expect the scalar units to be concatenated together, with the holes
4054 distributed evenly among the scalar units. Each scalar unit must occupy
4055 at least one register. */
4056 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
4058 /* As a consequence, we must be dealing with a constant number of
4059 scalars, and thus a constant offset and number of units. */
4060 HOST_WIDE_INT coffset
= offset
.to_constant ();
4061 HOST_WIDE_INT cysize
= ysize
.to_constant ();
4062 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
4063 unsigned int nunits
= GET_MODE_NUNITS (xmode
).to_constant ();
4064 scalar_mode xmode_unit
= GET_MODE_INNER (xmode
);
4065 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
4066 gcc_assert (nregs_xmode
4068 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
4069 gcc_assert (hard_regno_nregs (xregno
, xmode
)
4070 == hard_regno_nregs (xregno
, xmode_unit
) * nunits
);
4072 /* You can only ask for a SUBREG of a value with holes in the middle
4073 if you don't cross the holes. (Such a SUBREG should be done by
4074 picking a different register class, or doing it in memory if
4075 necessary.) An example of a value with holes is XCmode on 32-bit
4076 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
4077 3 for each part, but in memory it's two 128-bit parts.
4078 Padding is assumed to be at the end (not necessarily the 'high part')
4080 if ((coffset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
4081 && (coffset
/ GET_MODE_SIZE (xmode_unit
)
4082 != ((coffset
+ cysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
4084 info
->representable_p
= false;
4089 nregs_xmode
= hard_regno_nregs (xregno
, xmode
);
4091 nregs_ymode
= hard_regno_nregs (xregno
, ymode
);
4093 /* Subreg sizes must be ordered, so that we can tell whether they are
4094 partial, paradoxical or complete. */
4095 gcc_checking_assert (ordered_p (xsize
, ysize
));
4097 /* Paradoxical subregs are otherwise valid. */
4098 if (!rknown
&& known_eq (offset
, 0U) && maybe_gt (ysize
, xsize
))
4100 info
->representable_p
= true;
4101 /* If this is a big endian paradoxical subreg, which uses more
4102 actual hard registers than the original register, we must
4103 return a negative offset so that we find the proper highpart
4106 We assume that the ordering of registers within a multi-register
4107 value has a consistent endianness: if bytes and register words
4108 have different endianness, the hard registers that make up a
4109 multi-register value must be at least word-sized. */
4110 if (REG_WORDS_BIG_ENDIAN
)
4111 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
4114 info
->nregs
= nregs_ymode
;
4118 /* If registers store different numbers of bits in the different
4119 modes, we cannot generally form this subreg. */
4120 poly_uint64 regsize_xmode
, regsize_ymode
;
4121 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
4122 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
4123 && multiple_p (xsize
, nregs_xmode
, ®size_xmode
)
4124 && multiple_p (ysize
, nregs_ymode
, ®size_ymode
))
4127 && ((nregs_ymode
> 1 && maybe_gt (regsize_xmode
, regsize_ymode
))
4128 || (nregs_xmode
> 1 && maybe_gt (regsize_ymode
, regsize_xmode
))))
4130 info
->representable_p
= false;
4131 if (!can_div_away_from_zero_p (ysize
, regsize_xmode
, &info
->nregs
)
4132 || !can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
4133 /* Checked by validate_subreg. We must know at compile time
4134 which inner registers are being accessed. */
4138 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
4139 would go outside of XMODE. */
4140 if (!rknown
&& maybe_gt (ysize
+ offset
, xsize
))
4142 info
->representable_p
= false;
4143 info
->nregs
= nregs_ymode
;
4144 if (!can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
4145 /* Checked by validate_subreg. We must know at compile time
4146 which inner registers are being accessed. */
4150 /* Quick exit for the simple and common case of extracting whole
4151 subregisters from a multiregister value. */
4152 /* ??? It would be better to integrate this into the code below,
4153 if we can generalize the concept enough and figure out how
4154 odd-sized modes can coexist with the other weird cases we support. */
4155 HOST_WIDE_INT count
;
4157 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
4158 && known_eq (regsize_xmode
, regsize_ymode
)
4159 && constant_multiple_p (offset
, regsize_ymode
, &count
))
4161 info
->representable_p
= true;
4162 info
->nregs
= nregs_ymode
;
4163 info
->offset
= count
;
4164 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
4169 /* Lowpart subregs are otherwise valid. */
4170 if (!rknown
&& known_eq (offset
, subreg_lowpart_offset (ymode
, xmode
)))
4172 info
->representable_p
= true;
4175 if (known_eq (offset
, 0U) || nregs_xmode
== nregs_ymode
)
4178 info
->nregs
= nregs_ymode
;
4183 /* Set NUM_BLOCKS to the number of independently-representable YMODE
4184 values there are in (reg:XMODE XREGNO). We can view the register
4185 as consisting of this number of independent "blocks", where each
4186 block occupies NREGS_YMODE registers and contains exactly one
4187 representable YMODE value. */
4188 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
4189 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
4191 /* Calculate the number of bytes in each block. This must always
4192 be exact, otherwise we don't know how to verify the constraint.
4193 These conditions may be relaxed but subreg_regno_offset would
4194 need to be redesigned. */
4195 poly_uint64 bytes_per_block
= exact_div (xsize
, num_blocks
);
4197 /* Get the number of the first block that contains the subreg and the byte
4198 offset of the subreg from the start of that block. */
4199 unsigned int block_number
;
4200 poly_uint64 subblock_offset
;
4201 if (!can_div_trunc_p (offset
, bytes_per_block
, &block_number
,
4203 /* Checked by validate_subreg. We must know at compile time which
4204 inner registers are being accessed. */
4209 /* Only the lowpart of each block is representable. */
4210 info
->representable_p
4211 = known_eq (subblock_offset
,
4212 subreg_size_lowpart_offset (ysize
, bytes_per_block
));
4216 /* We assume that the ordering of registers within a multi-register
4217 value has a consistent endianness: if bytes and register words
4218 have different endianness, the hard registers that make up a
4219 multi-register value must be at least word-sized. */
4220 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
4221 /* The block number we calculated above followed memory endianness.
4222 Convert it to register endianness by counting back from the end.
4223 (Note that, because of the assumption above, each block must be
4224 at least word-sized.) */
4225 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
4227 info
->offset
= block_number
* nregs_ymode
;
4228 info
->nregs
= nregs_ymode
;
4231 /* This function returns the regno offset of a subreg expression.
4232 xregno - A regno of an inner hard subreg_reg (or what will become one).
4233 xmode - The mode of xregno.
4234 offset - The byte offset.
4235 ymode - The mode of a top level SUBREG (or what may become one).
4236 RETURN - The regno offset which would be used. */
4238 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
4239 poly_uint64 offset
, machine_mode ymode
)
4241 struct subreg_info info
;
4242 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
4246 /* This function returns true when the offset is representable via
4247 subreg_offset in the given regno.
4248 xregno - A regno of an inner hard subreg_reg (or what will become one).
4249 xmode - The mode of xregno.
4250 offset - The byte offset.
4251 ymode - The mode of a top level SUBREG (or what may become one).
4252 RETURN - Whether the offset is representable. */
4254 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
4255 poly_uint64 offset
, machine_mode ymode
)
4257 struct subreg_info info
;
4258 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
4259 return info
.representable_p
;
4262 /* Return the number of a YMODE register to which
4264 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
4266 can be simplified. Return -1 if the subreg can't be simplified.
4268 XREGNO is a hard register number. */
4271 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
4272 poly_uint64 offset
, machine_mode ymode
)
4274 struct subreg_info info
;
4275 unsigned int yregno
;
4277 /* Give the backend a chance to disallow the mode change. */
4278 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
4279 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
4280 && !REG_CAN_CHANGE_MODE_P (xregno
, xmode
, ymode
))
4283 /* We shouldn't simplify stack-related registers. */
4284 if ((!reload_completed
|| frame_pointer_needed
)
4285 && xregno
== FRAME_POINTER_REGNUM
)
4288 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
4289 && xregno
== ARG_POINTER_REGNUM
)
4292 if (xregno
== STACK_POINTER_REGNUM
4293 /* We should convert hard stack register in LRA if it is
4295 && ! lra_in_progress
)
4298 /* Try to get the register offset. */
4299 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
4300 if (!info
.representable_p
)
4303 /* Make sure that the offsetted register value is in range. */
4304 yregno
= xregno
+ info
.offset
;
4305 if (!HARD_REGISTER_NUM_P (yregno
))
4308 /* See whether (reg:YMODE YREGNO) is valid.
4310 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4311 This is a kludge to work around how complex FP arguments are passed
4312 on IA-64 and should be fixed. See PR target/49226. */
4313 if (!targetm
.hard_regno_mode_ok (yregno
, ymode
)
4314 && targetm
.hard_regno_mode_ok (xregno
, xmode
))
4317 return (int) yregno
;
4320 /* Return the final regno that a subreg expression refers to. */
4322 subreg_regno (const_rtx x
)
4325 rtx subreg
= SUBREG_REG (x
);
4326 int regno
= REGNO (subreg
);
4328 ret
= regno
+ subreg_regno_offset (regno
,
4336 /* Return the number of registers that a subreg expression refers
4339 subreg_nregs (const_rtx x
)
4341 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
4344 /* Return the number of registers that a subreg REG with REGNO
4345 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4346 changed so that the regno can be passed in. */
4349 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
4351 struct subreg_info info
;
4352 rtx subreg
= SUBREG_REG (x
);
4354 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
4359 struct parms_set_data
4365 /* Helper function for noticing stores to parameter registers. */
4367 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
4369 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
4370 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4371 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
4373 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
4378 /* Look backward for first parameter to be loaded.
4379 Note that loads of all parameters will not necessarily be
4380 found if CSE has eliminated some of them (e.g., an argument
4381 to the outer function is passed down as a parameter).
4382 Do not skip BOUNDARY. */
4384 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
4386 struct parms_set_data parm
;
4388 rtx_insn
*before
, *first_set
;
4390 /* Since different machines initialize their parameter registers
4391 in different orders, assume nothing. Collect the set of all
4392 parameter registers. */
4393 CLEAR_HARD_REG_SET (parm
.regs
);
4395 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
4396 if (GET_CODE (XEXP (p
, 0)) == USE
4397 && REG_P (XEXP (XEXP (p
, 0), 0))
4398 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
4400 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
4402 /* We only care about registers which can hold function
4404 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
4407 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
4411 first_set
= call_insn
;
4413 /* Search backward for the first set of a register in this set. */
4414 while (parm
.nregs
&& before
!= boundary
)
4416 before
= PREV_INSN (before
);
4418 /* It is possible that some loads got CSEed from one call to
4419 another. Stop in that case. */
4420 if (CALL_P (before
))
4423 /* Our caller needs either ensure that we will find all sets
4424 (in case code has not been optimized yet), or take care
4425 for possible labels in a way by setting boundary to preceding
4427 if (LABEL_P (before
))
4429 gcc_assert (before
== boundary
);
4433 if (INSN_P (before
))
4435 int nregs_old
= parm
.nregs
;
4436 note_stores (before
, parms_set
, &parm
);
4437 /* If we found something that did not set a parameter reg,
4438 we're done. Do not keep going, as that might result
4439 in hoisting an insn before the setting of a pseudo
4440 that is used by the hoisted insn. */
4441 if (nregs_old
!= parm
.nregs
)
4450 /* Return true if we should avoid inserting code between INSN and preceding
4451 call instruction. */
4454 keep_with_call_p (const rtx_insn
*insn
)
4458 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4460 if (REG_P (SET_DEST (set
))
4461 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4462 && fixed_regs
[REGNO (SET_DEST (set
))]
4463 && general_operand (SET_SRC (set
), VOIDmode
))
4465 if (REG_P (SET_SRC (set
))
4466 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4467 && REG_P (SET_DEST (set
))
4468 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4470 /* There may be a stack pop just after the call and before the store
4471 of the return register. Search for the actual store when deciding
4472 if we can break or not. */
4473 if (SET_DEST (set
) == stack_pointer_rtx
)
4475 /* This CONST_CAST is okay because next_nonnote_insn just
4476 returns its argument and we assign it to a const_rtx
4479 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4480 if (i2
&& keep_with_call_p (i2
))
4487 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4488 to non-complex jumps. That is, direct unconditional, conditional,
4489 and tablejumps, but not computed jumps or returns. It also does
4490 not apply to the fallthru case of a conditional jump. */
4493 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4495 rtx tmp
= JUMP_LABEL (jump_insn
);
4496 rtx_jump_table_data
*table
;
4501 if (tablejump_p (jump_insn
, NULL
, &table
))
4503 rtvec vec
= table
->get_labels ();
4504 int i
, veclen
= GET_NUM_ELEM (vec
);
4506 for (i
= 0; i
< veclen
; ++i
)
4507 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4511 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4518 /* Return an estimate of the cost of computing rtx X.
4519 One use is in cse, to decide which expression to keep in the hash table.
4520 Another is in rtl generation, to pick the cheapest way to multiply.
4521 Other uses like the latter are expected in the future.
4523 X appears as operand OPNO in an expression with code OUTER_CODE.
4524 SPEED specifies whether costs optimized for speed or size should
4528 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4529 int opno
, bool speed
)
4541 if (GET_CODE (x
) == SET
)
4542 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4543 the mode for the factor. */
4544 mode
= GET_MODE (SET_DEST (x
));
4545 else if (GET_MODE (x
) != VOIDmode
)
4546 mode
= GET_MODE (x
);
4548 mode_size
= estimated_poly_value (GET_MODE_SIZE (mode
));
4550 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4551 many insns, taking N times as long. */
4552 factor
= mode_size
> UNITS_PER_WORD
? mode_size
/ UNITS_PER_WORD
: 1;
4554 /* Compute the default costs of certain things.
4555 Note that targetm.rtx_costs can override the defaults. */
4557 code
= GET_CODE (x
);
4561 /* Multiplication has time-complexity O(N*N), where N is the
4562 number of units (translated from digits) when using
4563 schoolbook long multiplication. */
4564 total
= factor
* factor
* COSTS_N_INSNS (5);
4570 /* Similarly, complexity for schoolbook long division. */
4571 total
= factor
* factor
* COSTS_N_INSNS (7);
4574 /* Used in combine.c as a marker. */
4578 total
= factor
* COSTS_N_INSNS (1);
4588 /* If we can't tie these modes, make this expensive. The larger
4589 the mode, the more expensive it is. */
4590 if (!targetm
.modes_tieable_p (mode
, GET_MODE (SUBREG_REG (x
))))
4591 return COSTS_N_INSNS (2 + factor
);
4595 if (targetm
.modes_tieable_p (mode
, GET_MODE (XEXP (x
, 0))))
4602 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4607 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4608 which is already in total. */
4610 fmt
= GET_RTX_FORMAT (code
);
4611 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4613 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4614 else if (fmt
[i
] == 'E')
4615 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4616 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4621 /* Fill in the structure C with information about both speed and size rtx
4622 costs for X, which is operand OPNO in an expression with code OUTER. */
4625 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4626 struct full_rtx_costs
*c
)
4628 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4629 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4633 /* Return cost of address expression X.
4634 Expect that X is properly formed address reference.
4636 SPEED parameter specify whether costs optimized for speed or size should
4640 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4642 /* We may be asked for cost of various unusual addresses, such as operands
4643 of push instruction. It is not worthwhile to complicate writing
4644 of the target hook by such cases. */
4646 if (!memory_address_addr_space_p (mode
, x
, as
))
4649 return targetm
.address_cost (x
, mode
, as
, speed
);
4652 /* If the target doesn't override, compute the cost as with arithmetic. */
4655 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4657 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4661 unsigned HOST_WIDE_INT
4662 nonzero_bits (const_rtx x
, machine_mode mode
)
4664 if (mode
== VOIDmode
)
4665 mode
= GET_MODE (x
);
4666 scalar_int_mode int_mode
;
4667 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4668 return GET_MODE_MASK (mode
);
4669 return cached_nonzero_bits (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4673 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4675 if (mode
== VOIDmode
)
4676 mode
= GET_MODE (x
);
4677 scalar_int_mode int_mode
;
4678 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4680 return cached_num_sign_bit_copies (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4683 /* Return true if nonzero_bits1 might recurse into both operands
4687 nonzero_bits_binary_arith_p (const_rtx x
)
4689 if (!ARITHMETIC_P (x
))
4691 switch (GET_CODE (x
))
4713 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4714 It avoids exponential behavior in nonzero_bits1 when X has
4715 identical subexpressions on the first or the second level. */
4717 static unsigned HOST_WIDE_INT
4718 cached_nonzero_bits (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4719 machine_mode known_mode
,
4720 unsigned HOST_WIDE_INT known_ret
)
4722 if (x
== known_x
&& mode
== known_mode
)
4725 /* Try to find identical subexpressions. If found call
4726 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4727 precomputed value for the subexpression as KNOWN_RET. */
4729 if (nonzero_bits_binary_arith_p (x
))
4731 rtx x0
= XEXP (x
, 0);
4732 rtx x1
= XEXP (x
, 1);
4734 /* Check the first level. */
4736 return nonzero_bits1 (x
, mode
, x0
, mode
,
4737 cached_nonzero_bits (x0
, mode
, known_x
,
4738 known_mode
, known_ret
));
4740 /* Check the second level. */
4741 if (nonzero_bits_binary_arith_p (x0
)
4742 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4743 return nonzero_bits1 (x
, mode
, x1
, mode
,
4744 cached_nonzero_bits (x1
, mode
, known_x
,
4745 known_mode
, known_ret
));
4747 if (nonzero_bits_binary_arith_p (x1
)
4748 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4749 return nonzero_bits1 (x
, mode
, x0
, mode
,
4750 cached_nonzero_bits (x0
, mode
, known_x
,
4751 known_mode
, known_ret
));
4754 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4757 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4758 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4759 is less useful. We can't allow both, because that results in exponential
4760 run time recursion. There is a nullstone testcase that triggered
4761 this. This macro avoids accidental uses of num_sign_bit_copies. */
4762 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4764 /* Given an expression, X, compute which bits in X can be nonzero.
4765 We don't care about bits outside of those defined in MODE.
4767 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4768 an arithmetic operation, we can do better. */
4770 static unsigned HOST_WIDE_INT
4771 nonzero_bits1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4772 machine_mode known_mode
,
4773 unsigned HOST_WIDE_INT known_ret
)
4775 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4776 unsigned HOST_WIDE_INT inner_nz
;
4777 enum rtx_code code
= GET_CODE (x
);
4778 machine_mode inner_mode
;
4779 unsigned int inner_width
;
4780 scalar_int_mode xmode
;
4782 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4784 if (CONST_INT_P (x
))
4786 if (SHORT_IMMEDIATES_SIGN_EXTEND
4788 && mode_width
< BITS_PER_WORD
4789 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1))) != 0)
4790 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4795 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4797 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4799 /* If X is wider than MODE, use its mode instead. */
4800 if (xmode_width
> mode_width
)
4803 nonzero
= GET_MODE_MASK (mode
);
4804 mode_width
= xmode_width
;
4807 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4808 /* Our only callers in this case look for single bit values. So
4809 just return the mode mask. Those tests will then be false. */
4812 /* If MODE is wider than X, but both are a single word for both the host
4813 and target machines, we can compute this from which bits of the object
4814 might be nonzero in its own mode, taking into account the fact that, on
4815 CISC machines, accessing an object in a wider mode generally causes the
4816 high-order bits to become undefined, so they are not known to be zero.
4817 We extend this reasoning to RISC machines for operations that might not
4818 operate on the full registers. */
4819 if (mode_width
> xmode_width
4820 && xmode_width
<= BITS_PER_WORD
4821 && xmode_width
<= HOST_BITS_PER_WIDE_INT
4822 && !(WORD_REGISTER_OPERATIONS
&& word_register_operation_p (x
)))
4824 nonzero
&= cached_nonzero_bits (x
, xmode
,
4825 known_x
, known_mode
, known_ret
);
4826 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
);
4830 /* Please keep nonzero_bits_binary_arith_p above in sync with
4831 the code in the switch below. */
4835 #if defined(POINTERS_EXTEND_UNSIGNED)
4836 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4837 all the bits above ptr_mode are known to be zero. */
4838 /* As we do not know which address space the pointer is referring to,
4839 we can do this only if the target does not support different pointer
4840 or address modes depending on the address space. */
4841 if (target_default_pointer_address_modes_p ()
4842 && POINTERS_EXTEND_UNSIGNED
4845 && !targetm
.have_ptr_extend ())
4846 nonzero
&= GET_MODE_MASK (ptr_mode
);
4849 /* Include declared information about alignment of pointers. */
4850 /* ??? We don't properly preserve REG_POINTER changes across
4851 pointer-to-integer casts, so we can't trust it except for
4852 things that we know must be pointers. See execute/960116-1.c. */
4853 if ((x
== stack_pointer_rtx
4854 || x
== frame_pointer_rtx
4855 || x
== arg_pointer_rtx
)
4856 && REGNO_POINTER_ALIGN (REGNO (x
)))
4858 unsigned HOST_WIDE_INT alignment
4859 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4861 #ifdef PUSH_ROUNDING
4862 /* If PUSH_ROUNDING is defined, it is possible for the
4863 stack to be momentarily aligned only to that amount,
4864 so we pick the least alignment. */
4865 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4867 poly_uint64 rounded_1
= PUSH_ROUNDING (poly_int64 (1));
4868 alignment
= MIN (known_alignment (rounded_1
), alignment
);
4872 nonzero
&= ~(alignment
- 1);
4876 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4877 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, xmode
, mode
,
4881 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4882 known_mode
, known_ret
);
4884 return nonzero_for_hook
;
4888 /* In many, if not most, RISC machines, reading a byte from memory
4889 zeros the rest of the register. Noticing that fact saves a lot
4890 of extra zero-extends. */
4891 if (load_extend_op (xmode
) == ZERO_EXTEND
)
4892 nonzero
&= GET_MODE_MASK (xmode
);
4896 case UNEQ
: case LTGT
:
4897 case GT
: case GTU
: case UNGT
:
4898 case LT
: case LTU
: case UNLT
:
4899 case GE
: case GEU
: case UNGE
:
4900 case LE
: case LEU
: case UNLE
:
4901 case UNORDERED
: case ORDERED
:
4902 /* If this produces an integer result, we know which bits are set.
4903 Code here used to clear bits outside the mode of X, but that is
4905 /* Mind that MODE is the mode the caller wants to look at this
4906 operation in, and not the actual operation mode. We can wind
4907 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4908 that describes the results of a vector compare. */
4909 if (GET_MODE_CLASS (xmode
) == MODE_INT
4910 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4911 nonzero
= STORE_FLAG_VALUE
;
4916 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4917 and num_sign_bit_copies. */
4918 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4922 if (xmode_width
< mode_width
)
4923 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
));
4928 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4929 and num_sign_bit_copies. */
4930 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4936 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4937 known_x
, known_mode
, known_ret
)
4938 & GET_MODE_MASK (mode
));
4942 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4943 known_x
, known_mode
, known_ret
);
4944 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4945 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4949 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4950 Otherwise, show all the bits in the outer mode but not the inner
4952 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4953 known_x
, known_mode
, known_ret
);
4954 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4956 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4957 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4958 inner_nz
|= (GET_MODE_MASK (mode
)
4959 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4962 nonzero
&= inner_nz
;
4966 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4967 known_x
, known_mode
, known_ret
)
4968 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4969 known_x
, known_mode
, known_ret
);
4973 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4975 unsigned HOST_WIDE_INT nonzero0
4976 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4977 known_x
, known_mode
, known_ret
);
4979 /* Don't call nonzero_bits for the second time if it cannot change
4981 if ((nonzero
& nonzero0
) != nonzero
)
4983 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4984 known_x
, known_mode
, known_ret
);
4988 case PLUS
: case MINUS
:
4990 case DIV
: case UDIV
:
4991 case MOD
: case UMOD
:
4992 /* We can apply the rules of arithmetic to compute the number of
4993 high- and low-order zero bits of these operations. We start by
4994 computing the width (position of the highest-order nonzero bit)
4995 and the number of low-order zero bits for each value. */
4997 unsigned HOST_WIDE_INT nz0
4998 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4999 known_x
, known_mode
, known_ret
);
5000 unsigned HOST_WIDE_INT nz1
5001 = cached_nonzero_bits (XEXP (x
, 1), mode
,
5002 known_x
, known_mode
, known_ret
);
5003 int sign_index
= xmode_width
- 1;
5004 int width0
= floor_log2 (nz0
) + 1;
5005 int width1
= floor_log2 (nz1
) + 1;
5006 int low0
= ctz_or_zero (nz0
);
5007 int low1
= ctz_or_zero (nz1
);
5008 unsigned HOST_WIDE_INT op0_maybe_minusp
5009 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
5010 unsigned HOST_WIDE_INT op1_maybe_minusp
5011 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
5012 unsigned int result_width
= mode_width
;
5018 result_width
= MAX (width0
, width1
) + 1;
5019 result_low
= MIN (low0
, low1
);
5022 result_low
= MIN (low0
, low1
);
5025 result_width
= width0
+ width1
;
5026 result_low
= low0
+ low1
;
5031 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
5032 result_width
= width0
;
5037 result_width
= width0
;
5042 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
5043 result_width
= MIN (width0
, width1
);
5044 result_low
= MIN (low0
, low1
);
5049 result_width
= MIN (width0
, width1
);
5050 result_low
= MIN (low0
, low1
);
5056 if (result_width
< mode_width
)
5057 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
5060 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
5065 if (CONST_INT_P (XEXP (x
, 1))
5066 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
5067 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
5071 /* If this is a SUBREG formed for a promoted variable that has
5072 been zero-extended, we know that at least the high-order bits
5073 are zero, though others might be too. */
5074 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
5075 nonzero
= GET_MODE_MASK (xmode
)
5076 & cached_nonzero_bits (SUBREG_REG (x
), xmode
,
5077 known_x
, known_mode
, known_ret
);
5079 /* If the inner mode is a single word for both the host and target
5080 machines, we can compute this from which bits of the inner
5081 object might be nonzero. */
5082 inner_mode
= GET_MODE (SUBREG_REG (x
));
5083 if (GET_MODE_PRECISION (inner_mode
).is_constant (&inner_width
)
5084 && inner_width
<= BITS_PER_WORD
5085 && inner_width
<= HOST_BITS_PER_WIDE_INT
)
5087 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
5088 known_x
, known_mode
, known_ret
);
5090 /* On a typical CISC machine, accessing an object in a wider mode
5091 causes the high-order bits to become undefined. So they are
5092 not known to be zero.
5094 On a typical RISC machine, we only have to worry about the way
5095 loads are extended. Otherwise, if we get a reload for the inner
5096 part, it may be loaded from the stack, and then we may lose all
5097 the zero bits that existed before the store to the stack. */
5099 if ((!WORD_REGISTER_OPERATIONS
5100 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
5101 ? val_signbit_known_set_p (inner_mode
, nonzero
)
5102 : extend_op
!= ZERO_EXTEND
)
5103 || !MEM_P (SUBREG_REG (x
)))
5104 && xmode_width
> inner_width
)
5106 |= (GET_MODE_MASK (GET_MODE (x
)) & ~GET_MODE_MASK (inner_mode
));
5115 /* The nonzero bits are in two classes: any bits within MODE
5116 that aren't in xmode are always significant. The rest of the
5117 nonzero bits are those that are significant in the operand of
5118 the shift when shifted the appropriate number of bits. This
5119 shows that high-order bits are cleared by the right shift and
5120 low-order bits by left shifts. */
5121 if (CONST_INT_P (XEXP (x
, 1))
5122 && INTVAL (XEXP (x
, 1)) >= 0
5123 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
5124 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5126 int count
= INTVAL (XEXP (x
, 1));
5127 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (xmode
);
5128 unsigned HOST_WIDE_INT op_nonzero
5129 = cached_nonzero_bits (XEXP (x
, 0), mode
,
5130 known_x
, known_mode
, known_ret
);
5131 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
5132 unsigned HOST_WIDE_INT outer
= 0;
5134 if (mode_width
> xmode_width
)
5135 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
5150 /* If the sign bit may have been nonzero before the shift, we
5151 need to mark all the places it could have been copied to
5152 by the shift as possibly nonzero. */
5153 if (inner
& (HOST_WIDE_INT_1U
<< (xmode_width
- 1 - count
)))
5154 inner
|= (((HOST_WIDE_INT_1U
<< count
) - 1)
5155 << (xmode_width
- count
));
5159 inner
= (inner
<< (count
% xmode_width
)
5160 | (inner
>> (xmode_width
- (count
% xmode_width
))))
5165 inner
= (inner
>> (count
% xmode_width
)
5166 | (inner
<< (xmode_width
- (count
% xmode_width
))))
5174 nonzero
&= (outer
| inner
);
5180 /* This is at most the number of bits in the mode. */
5181 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
5185 /* If CLZ has a known value at zero, then the nonzero bits are
5186 that value, plus the number of bits in the mode minus one. */
5187 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
5189 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
5195 /* If CTZ has a known value at zero, then the nonzero bits are
5196 that value, plus the number of bits in the mode minus one. */
5197 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
5199 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
5205 /* This is at most the number of bits in the mode minus 1. */
5206 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
5215 unsigned HOST_WIDE_INT nonzero_true
5216 = cached_nonzero_bits (XEXP (x
, 1), mode
,
5217 known_x
, known_mode
, known_ret
);
5219 /* Don't call nonzero_bits for the second time if it cannot change
5221 if ((nonzero
& nonzero_true
) != nonzero
)
5222 nonzero
&= nonzero_true
5223 | cached_nonzero_bits (XEXP (x
, 2), mode
,
5224 known_x
, known_mode
, known_ret
);
5235 /* See the macro definition above. */
5236 #undef cached_num_sign_bit_copies
5239 /* Return true if num_sign_bit_copies1 might recurse into both operands
5243 num_sign_bit_copies_binary_arith_p (const_rtx x
)
5245 if (!ARITHMETIC_P (x
))
5247 switch (GET_CODE (x
))
5265 /* The function cached_num_sign_bit_copies is a wrapper around
5266 num_sign_bit_copies1. It avoids exponential behavior in
5267 num_sign_bit_copies1 when X has identical subexpressions on the
5268 first or the second level. */
5271 cached_num_sign_bit_copies (const_rtx x
, scalar_int_mode mode
,
5272 const_rtx known_x
, machine_mode known_mode
,
5273 unsigned int known_ret
)
5275 if (x
== known_x
&& mode
== known_mode
)
5278 /* Try to find identical subexpressions. If found call
5279 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5280 the precomputed value for the subexpression as KNOWN_RET. */
5282 if (num_sign_bit_copies_binary_arith_p (x
))
5284 rtx x0
= XEXP (x
, 0);
5285 rtx x1
= XEXP (x
, 1);
5287 /* Check the first level. */
5290 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
5291 cached_num_sign_bit_copies (x0
, mode
, known_x
,
5295 /* Check the second level. */
5296 if (num_sign_bit_copies_binary_arith_p (x0
)
5297 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
5299 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
5300 cached_num_sign_bit_copies (x1
, mode
, known_x
,
5304 if (num_sign_bit_copies_binary_arith_p (x1
)
5305 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
5307 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
5308 cached_num_sign_bit_copies (x0
, mode
, known_x
,
5313 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
5316 /* Return the number of bits at the high-order end of X that are known to
5317 be equal to the sign bit. X will be used in mode MODE. The returned
5318 value will always be between 1 and the number of bits in MODE. */
5321 num_sign_bit_copies1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
5322 machine_mode known_mode
,
5323 unsigned int known_ret
)
5325 enum rtx_code code
= GET_CODE (x
);
5326 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
5327 int num0
, num1
, result
;
5328 unsigned HOST_WIDE_INT nonzero
;
5330 if (CONST_INT_P (x
))
5332 /* If the constant is negative, take its 1's complement and remask.
5333 Then see how many zero bits we have. */
5334 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
5335 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5336 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5337 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5339 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5342 scalar_int_mode xmode
, inner_mode
;
5343 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
5346 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
5348 /* For a smaller mode, just ignore the high bits. */
5349 if (bitwidth
< xmode_width
)
5351 num0
= cached_num_sign_bit_copies (x
, xmode
,
5352 known_x
, known_mode
, known_ret
);
5353 return MAX (1, num0
- (int) (xmode_width
- bitwidth
));
5356 if (bitwidth
> xmode_width
)
5358 /* If this machine does not do all register operations on the entire
5359 register and MODE is wider than the mode of X, we can say nothing
5360 at all about the high-order bits. We extend this reasoning to RISC
5361 machines for operations that might not operate on full registers. */
5362 if (!(WORD_REGISTER_OPERATIONS
&& word_register_operation_p (x
)))
5365 /* Likewise on machines that do, if the mode of the object is smaller
5366 than a word and loads of that size don't sign extend, we can say
5367 nothing about the high order bits. */
5368 if (xmode_width
< BITS_PER_WORD
5369 && load_extend_op (xmode
) != SIGN_EXTEND
)
5373 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5374 the code in the switch below. */
5379 #if defined(POINTERS_EXTEND_UNSIGNED)
5380 /* If pointers extend signed and this is a pointer in Pmode, say that
5381 all the bits above ptr_mode are known to be sign bit copies. */
5382 /* As we do not know which address space the pointer is referring to,
5383 we can do this only if the target does not support different pointer
5384 or address modes depending on the address space. */
5385 if (target_default_pointer_address_modes_p ()
5386 && ! POINTERS_EXTEND_UNSIGNED
&& xmode
== Pmode
5387 && mode
== Pmode
&& REG_POINTER (x
)
5388 && !targetm
.have_ptr_extend ())
5389 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
5393 unsigned int copies_for_hook
= 1, copies
= 1;
5394 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, xmode
, mode
,
5398 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
5399 known_mode
, known_ret
);
5401 if (copies
> 1 || copies_for_hook
> 1)
5402 return MAX (copies
, copies_for_hook
);
5404 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5409 /* Some RISC machines sign-extend all loads of smaller than a word. */
5410 if (load_extend_op (xmode
) == SIGN_EXTEND
)
5411 return MAX (1, ((int) bitwidth
- (int) xmode_width
+ 1));
5415 /* If this is a SUBREG for a promoted object that is sign-extended
5416 and we are looking at it in a wider mode, we know that at least the
5417 high-order bits are known to be sign bit copies. */
5419 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
5421 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5422 known_x
, known_mode
, known_ret
);
5423 return MAX ((int) bitwidth
- (int) xmode_width
+ 1, num0
);
5426 if (is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)), &inner_mode
))
5428 /* For a smaller object, just ignore the high bits. */
5429 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
5431 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), inner_mode
,
5432 known_x
, known_mode
,
5434 return MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5438 /* For paradoxical SUBREGs on machines where all register operations
5439 affect the entire register, just look inside. Note that we are
5440 passing MODE to the recursive call, so the number of sign bit
5441 copies will remain relative to that mode, not the inner mode.
5443 This works only if loads sign extend. Otherwise, if we get a
5444 reload for the inner part, it may be loaded from the stack, and
5445 then we lose all sign bit copies that existed before the store
5447 if (WORD_REGISTER_OPERATIONS
5448 && load_extend_op (inner_mode
) == SIGN_EXTEND
5449 && paradoxical_subreg_p (x
)
5450 && MEM_P (SUBREG_REG (x
)))
5451 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5452 known_x
, known_mode
, known_ret
);
5457 if (CONST_INT_P (XEXP (x
, 1)))
5458 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5462 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
5463 return (bitwidth
- GET_MODE_PRECISION (inner_mode
)
5464 + cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5465 known_x
, known_mode
, known_ret
));
5469 /* For a smaller object, just ignore the high bits. */
5470 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
5471 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5472 known_x
, known_mode
, known_ret
);
5473 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5477 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5478 known_x
, known_mode
, known_ret
);
5480 case ROTATE
: case ROTATERT
:
5481 /* If we are rotating left by a number of bits less than the number
5482 of sign bit copies, we can just subtract that amount from the
5484 if (CONST_INT_P (XEXP (x
, 1))
5485 && INTVAL (XEXP (x
, 1)) >= 0
5486 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5488 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5489 known_x
, known_mode
, known_ret
);
5490 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5491 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5496 /* In general, this subtracts one sign bit copy. But if the value
5497 is known to be positive, the number of sign bit copies is the
5498 same as that of the input. Finally, if the input has just one bit
5499 that might be nonzero, all the bits are copies of the sign bit. */
5500 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5501 known_x
, known_mode
, known_ret
);
5502 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5503 return num0
> 1 ? num0
- 1 : 1;
5505 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5510 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5515 case IOR
: case AND
: case XOR
:
5516 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5517 /* Logical operations will preserve the number of sign-bit copies.
5518 MIN and MAX operations always return one of the operands. */
5519 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5520 known_x
, known_mode
, known_ret
);
5521 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5522 known_x
, known_mode
, known_ret
);
5524 /* If num1 is clearing some of the top bits then regardless of
5525 the other term, we are guaranteed to have at least that many
5526 high-order zero bits. */
5529 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5530 && CONST_INT_P (XEXP (x
, 1))
5531 && (UINTVAL (XEXP (x
, 1))
5532 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5535 /* Similarly for IOR when setting high-order bits. */
5538 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5539 && CONST_INT_P (XEXP (x
, 1))
5540 && (UINTVAL (XEXP (x
, 1))
5541 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5544 return MIN (num0
, num1
);
5546 case PLUS
: case MINUS
:
5547 /* For addition and subtraction, we can have a 1-bit carry. However,
5548 if we are subtracting 1 from a positive number, there will not
5549 be such a carry. Furthermore, if the positive number is known to
5550 be 0 or 1, we know the result is either -1 or 0. */
5552 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5553 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5555 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5556 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5557 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5558 : bitwidth
- floor_log2 (nonzero
) - 1);
5561 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5562 known_x
, known_mode
, known_ret
);
5563 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5564 known_x
, known_mode
, known_ret
);
5565 result
= MAX (1, MIN (num0
, num1
) - 1);
5570 /* The number of bits of the product is the sum of the number of
5571 bits of both terms. However, unless one of the terms if known
5572 to be positive, we must allow for an additional bit since negating
5573 a negative number can remove one sign bit copy. */
5575 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5576 known_x
, known_mode
, known_ret
);
5577 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5578 known_x
, known_mode
, known_ret
);
5580 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5582 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5583 || (((nonzero_bits (XEXP (x
, 0), mode
)
5584 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5585 && ((nonzero_bits (XEXP (x
, 1), mode
)
5586 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5590 return MAX (1, result
);
5593 /* The result must be <= the first operand. If the first operand
5594 has the high bit set, we know nothing about the number of sign
5596 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5598 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5599 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5602 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5603 known_x
, known_mode
, known_ret
);
5606 /* The result must be <= the second operand. If the second operand
5607 has (or just might have) the high bit set, we know nothing about
5608 the number of sign bit copies. */
5609 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5611 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5612 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5615 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5616 known_x
, known_mode
, known_ret
);
5619 /* Similar to unsigned division, except that we have to worry about
5620 the case where the divisor is negative, in which case we have
5622 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5623 known_x
, known_mode
, known_ret
);
5625 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5626 || (nonzero_bits (XEXP (x
, 1), mode
)
5627 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5633 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5634 known_x
, known_mode
, known_ret
);
5636 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5637 || (nonzero_bits (XEXP (x
, 1), mode
)
5638 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5644 /* Shifts by a constant add to the number of bits equal to the
5646 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5647 known_x
, known_mode
, known_ret
);
5648 if (CONST_INT_P (XEXP (x
, 1))
5649 && INTVAL (XEXP (x
, 1)) > 0
5650 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5651 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5656 /* Left shifts destroy copies. */
5657 if (!CONST_INT_P (XEXP (x
, 1))
5658 || INTVAL (XEXP (x
, 1)) < 0
5659 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5660 || INTVAL (XEXP (x
, 1)) >= xmode_width
)
5663 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5664 known_x
, known_mode
, known_ret
);
5665 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5668 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5669 known_x
, known_mode
, known_ret
);
5670 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5671 known_x
, known_mode
, known_ret
);
5672 return MIN (num0
, num1
);
5674 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5675 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5676 case GEU
: case GTU
: case LEU
: case LTU
:
5677 case UNORDERED
: case ORDERED
:
5678 /* If the constant is negative, take its 1's complement and remask.
5679 Then see how many zero bits we have. */
5680 nonzero
= STORE_FLAG_VALUE
;
5681 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5682 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5683 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5685 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5691 /* If we haven't been able to figure it out by one of the above rules,
5692 see if some of the high-order bits are known to be zero. If so,
5693 count those bits and return one less than that amount. If we can't
5694 safely compute the mask for this mode, always return BITWIDTH. */
5696 bitwidth
= GET_MODE_PRECISION (mode
);
5697 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5700 nonzero
= nonzero_bits (x
, mode
);
5701 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5702 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5705 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5706 zero indicates an instruction pattern without a known cost. */
5709 pattern_cost (rtx pat
, bool speed
)
5714 /* Extract the single set rtx from the instruction pattern. We
5715 can't use single_set since we only have the pattern. We also
5716 consider PARALLELs of a normal set and a single comparison. In
5717 that case we use the cost of the non-comparison SET operation,
5718 which is most-likely to be the real cost of this operation. */
5719 if (GET_CODE (pat
) == SET
)
5721 else if (GET_CODE (pat
) == PARALLEL
)
5724 rtx comparison
= NULL_RTX
;
5726 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5728 rtx x
= XVECEXP (pat
, 0, i
);
5729 if (GET_CODE (x
) == SET
)
5731 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5746 if (!set
&& comparison
)
5755 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5756 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5759 /* Calculate the cost of a single instruction. A return value of zero
5760 indicates an instruction pattern without a known cost. */
5763 insn_cost (rtx_insn
*insn
, bool speed
)
5765 if (targetm
.insn_cost
)
5766 return targetm
.insn_cost (insn
, speed
);
5768 return pattern_cost (PATTERN (insn
), speed
);
5771 /* Returns estimate on cost of computing SEQ. */
5774 seq_cost (const rtx_insn
*seq
, bool speed
)
5779 for (; seq
; seq
= NEXT_INSN (seq
))
5781 set
= single_set (seq
);
5783 cost
+= set_rtx_cost (set
, speed
);
5784 else if (NONDEBUG_INSN_P (seq
))
5786 int this_cost
= insn_cost (CONST_CAST_RTX_INSN (seq
), speed
);
5797 /* Given an insn INSN and condition COND, return the condition in a
5798 canonical form to simplify testing by callers. Specifically:
5800 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5801 (2) Both operands will be machine operands; (cc0) will have been replaced.
5802 (3) If an operand is a constant, it will be the second operand.
5803 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5804 for GE, GEU, and LEU.
5806 If the condition cannot be understood, or is an inequality floating-point
5807 comparison which needs to be reversed, 0 will be returned.
5809 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5811 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5812 insn used in locating the condition was found. If a replacement test
5813 of the condition is desired, it should be placed in front of that
5814 insn and we will be sure that the inputs are still valid.
5816 If WANT_REG is nonzero, we wish the condition to be relative to that
5817 register, if possible. Therefore, do not canonicalize the condition
5818 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5819 to be a compare to a CC mode register.
5821 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5825 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5826 rtx_insn
**earliest
,
5827 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5830 rtx_insn
*prev
= insn
;
5834 int reverse_code
= 0;
5836 basic_block bb
= BLOCK_FOR_INSN (insn
);
5838 code
= GET_CODE (cond
);
5839 mode
= GET_MODE (cond
);
5840 op0
= XEXP (cond
, 0);
5841 op1
= XEXP (cond
, 1);
5844 code
= reversed_comparison_code (cond
, insn
);
5845 if (code
== UNKNOWN
)
5851 /* If we are comparing a register with zero, see if the register is set
5852 in the previous insn to a COMPARE or a comparison operation. Perform
5853 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5856 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5857 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5858 && op1
== CONST0_RTX (GET_MODE (op0
))
5861 /* Set nonzero when we find something of interest. */
5864 /* If comparison with cc0, import actual comparison from compare
5868 if ((prev
= prev_nonnote_insn (prev
)) == 0
5869 || !NONJUMP_INSN_P (prev
)
5870 || (set
= single_set (prev
)) == 0
5871 || SET_DEST (set
) != cc0_rtx
)
5874 op0
= SET_SRC (set
);
5875 op1
= CONST0_RTX (GET_MODE (op0
));
5880 /* If this is a COMPARE, pick up the two things being compared. */
5881 if (GET_CODE (op0
) == COMPARE
)
5883 op1
= XEXP (op0
, 1);
5884 op0
= XEXP (op0
, 0);
5887 else if (!REG_P (op0
))
5890 /* Go back to the previous insn. Stop if it is not an INSN. We also
5891 stop if it isn't a single set or if it has a REG_INC note because
5892 we don't want to bother dealing with it. */
5894 prev
= prev_nonnote_nondebug_insn (prev
);
5897 || !NONJUMP_INSN_P (prev
)
5898 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5899 /* In cfglayout mode, there do not have to be labels at the
5900 beginning of a block, or jumps at the end, so the previous
5901 conditions would not stop us when we reach bb boundary. */
5902 || BLOCK_FOR_INSN (prev
) != bb
)
5905 set
= set_of (op0
, prev
);
5908 && (GET_CODE (set
) != SET
5909 || !rtx_equal_p (SET_DEST (set
), op0
)))
5912 /* If this is setting OP0, get what it sets it to if it looks
5916 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5917 #ifdef FLOAT_STORE_FLAG_VALUE
5918 REAL_VALUE_TYPE fsfv
;
5921 /* ??? We may not combine comparisons done in a CCmode with
5922 comparisons not done in a CCmode. This is to aid targets
5923 like Alpha that have an IEEE compliant EQ instruction, and
5924 a non-IEEE compliant BEQ instruction. The use of CCmode is
5925 actually artificial, simply to prevent the combination, but
5926 should not affect other platforms.
5928 However, we must allow VOIDmode comparisons to match either
5929 CCmode or non-CCmode comparison, because some ports have
5930 modeless comparisons inside branch patterns.
5932 ??? This mode check should perhaps look more like the mode check
5933 in simplify_comparison in combine. */
5934 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5935 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5937 && inner_mode
!= VOIDmode
)
5939 if (GET_CODE (SET_SRC (set
)) == COMPARE
5942 && val_signbit_known_set_p (inner_mode
,
5944 #ifdef FLOAT_STORE_FLAG_VALUE
5946 && SCALAR_FLOAT_MODE_P (inner_mode
)
5947 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5948 REAL_VALUE_NEGATIVE (fsfv
)))
5951 && COMPARISON_P (SET_SRC (set
))))
5953 else if (((code
== EQ
5955 && val_signbit_known_set_p (inner_mode
,
5957 #ifdef FLOAT_STORE_FLAG_VALUE
5959 && SCALAR_FLOAT_MODE_P (inner_mode
)
5960 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5961 REAL_VALUE_NEGATIVE (fsfv
)))
5964 && COMPARISON_P (SET_SRC (set
)))
5969 else if ((code
== EQ
|| code
== NE
)
5970 && GET_CODE (SET_SRC (set
)) == XOR
)
5971 /* Handle sequences like:
5974 ...(eq|ne op0 (const_int 0))...
5978 (eq op0 (const_int 0)) reduces to (eq X Y)
5979 (ne op0 (const_int 0)) reduces to (ne X Y)
5981 This is the form used by MIPS16, for example. */
5987 else if (reg_set_p (op0
, prev
))
5988 /* If this sets OP0, but not directly, we have to give up. */
5993 /* If the caller is expecting the condition to be valid at INSN,
5994 make sure X doesn't change before INSN. */
5995 if (valid_at_insn_p
)
5996 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5998 if (COMPARISON_P (x
))
5999 code
= GET_CODE (x
);
6002 code
= reversed_comparison_code (x
, prev
);
6003 if (code
== UNKNOWN
)
6008 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
6014 /* If constant is first, put it last. */
6015 if (CONSTANT_P (op0
))
6016 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
6018 /* If OP0 is the result of a comparison, we weren't able to find what
6019 was really being compared, so fail. */
6021 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6024 /* Canonicalize any ordered comparison with integers involving equality
6025 if we can do computations in the relevant mode and we do not
6028 scalar_int_mode op0_mode
;
6029 if (CONST_INT_P (op1
)
6030 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &op0_mode
)
6031 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
)
6033 HOST_WIDE_INT const_val
= INTVAL (op1
);
6034 unsigned HOST_WIDE_INT uconst_val
= const_val
;
6035 unsigned HOST_WIDE_INT max_val
6036 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (op0_mode
);
6041 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
6042 code
= LT
, op1
= gen_int_mode (const_val
+ 1, op0_mode
);
6045 /* When cross-compiling, const_val might be sign-extended from
6046 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
6048 if ((const_val
& max_val
)
6049 != (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (op0_mode
) - 1)))
6050 code
= GT
, op1
= gen_int_mode (const_val
- 1, op0_mode
);
6054 if (uconst_val
< max_val
)
6055 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, op0_mode
);
6059 if (uconst_val
!= 0)
6060 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, op0_mode
);
6068 /* Never return CC0; return zero instead. */
6072 /* We promised to return a comparison. */
6073 rtx ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
6074 if (COMPARISON_P (ret
))
6079 /* Given a jump insn JUMP, return the condition that will cause it to branch
6080 to its JUMP_LABEL. If the condition cannot be understood, or is an
6081 inequality floating-point comparison which needs to be reversed, 0 will
6084 If EARLIEST is nonzero, it is a pointer to a place where the earliest
6085 insn used in locating the condition was found. If a replacement test
6086 of the condition is desired, it should be placed in front of that
6087 insn and we will be sure that the inputs are still valid. If EARLIEST
6088 is null, the returned condition will be valid at INSN.
6090 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
6091 compare CC mode register.
6093 VALID_AT_INSN_P is the same as for canonicalize_condition. */
6096 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
6097 int valid_at_insn_p
)
6103 /* If this is not a standard conditional jump, we can't parse it. */
6105 || ! any_condjump_p (jump
))
6107 set
= pc_set (jump
);
6109 cond
= XEXP (SET_SRC (set
), 0);
6111 /* If this branches to JUMP_LABEL when the condition is false, reverse
6114 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
6115 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
6117 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
6118 allow_cc_mode
, valid_at_insn_p
);
6121 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
6122 TARGET_MODE_REP_EXTENDED.
6124 Note that we assume that the property of
6125 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
6126 narrower than mode B. I.e., if A is a mode narrower than B then in
6127 order to be able to operate on it in mode B, mode A needs to
6128 satisfy the requirements set by the representation of mode B. */
6131 init_num_sign_bit_copies_in_rep (void)
6133 opt_scalar_int_mode in_mode_iter
;
6134 scalar_int_mode mode
;
6136 FOR_EACH_MODE_IN_CLASS (in_mode_iter
, MODE_INT
)
6137 FOR_EACH_MODE_UNTIL (mode
, in_mode_iter
.require ())
6139 scalar_int_mode in_mode
= in_mode_iter
.require ();
6142 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
6143 extends to the next widest mode. */
6144 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
6145 || GET_MODE_WIDER_MODE (mode
).require () == in_mode
);
6147 /* We are in in_mode. Count how many bits outside of mode
6148 have to be copies of the sign-bit. */
6149 FOR_EACH_MODE (i
, mode
, in_mode
)
6151 /* This must always exist (for the last iteration it will be
6153 scalar_int_mode wider
= GET_MODE_WIDER_MODE (i
).require ();
6155 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
6156 /* We can only check sign-bit copies starting from the
6157 top-bit. In order to be able to check the bits we
6158 have already seen we pretend that subsequent bits
6159 have to be sign-bit copies too. */
6160 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
6161 num_sign_bit_copies_in_rep
[in_mode
][mode
]
6162 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
6167 /* Suppose that truncation from the machine mode of X to MODE is not a
6168 no-op. See if there is anything special about X so that we can
6169 assume it already contains a truncated value of MODE. */
6172 truncated_to_mode (machine_mode mode
, const_rtx x
)
6174 /* This register has already been used in MODE without explicit
6176 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
6179 /* See if we already satisfy the requirements of MODE. If yes we
6180 can just switch to MODE. */
6181 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
6182 && (num_sign_bit_copies (x
, GET_MODE (x
))
6183 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
6189 /* Return true if RTX code CODE has a single sequence of zero or more
6190 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
6191 entry in that case. */
6194 setup_reg_subrtx_bounds (unsigned int code
)
6196 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
6198 for (; format
[i
] != 'e'; ++i
)
6201 /* No subrtxes. Leave start and count as 0. */
6203 if (format
[i
] == 'E' || format
[i
] == 'V')
6207 /* Record the sequence of 'e's. */
6208 rtx_all_subrtx_bounds
[code
].start
= i
;
6211 while (format
[i
] == 'e');
6212 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
6213 /* rtl-iter.h relies on this. */
6214 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
6216 for (; format
[i
]; ++i
)
6217 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
6223 /* Initialize rtx_all_subrtx_bounds. */
6228 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
6230 if (!setup_reg_subrtx_bounds (i
))
6231 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
6232 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
6233 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
6236 init_num_sign_bit_copies_in_rep ();
6239 /* Check whether this is a constant pool constant. */
6241 constant_pool_constant_p (rtx x
)
6243 x
= avoid_constant_pool_reference (x
);
6244 return CONST_DOUBLE_P (x
);
6247 /* If M is a bitmask that selects a field of low-order bits within an item but
6248 not the entire word, return the length of the field. Return -1 otherwise.
6249 M is used in machine mode MODE. */
6252 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
6254 if (mode
!= VOIDmode
)
6256 if (!HWI_COMPUTABLE_MODE_P (mode
))
6258 m
&= GET_MODE_MASK (mode
);
6261 return exact_log2 (m
+ 1);
6264 /* Return the mode of MEM's address. */
6267 get_address_mode (rtx mem
)
6271 gcc_assert (MEM_P (mem
));
6272 mode
= GET_MODE (XEXP (mem
, 0));
6273 if (mode
!= VOIDmode
)
6274 return as_a
<scalar_int_mode
> (mode
);
6275 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
6278 /* Split up a CONST_DOUBLE or integer constant rtx
6279 into two rtx's for single words,
6280 storing in *FIRST the word that comes first in memory in the target
6281 and in *SECOND the other.
6283 TODO: This function needs to be rewritten to work on any size
6287 split_double (rtx value
, rtx
*first
, rtx
*second
)
6289 if (CONST_INT_P (value
))
6291 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
6293 /* In this case the CONST_INT holds both target words.
6294 Extract the bits from it into two word-sized pieces.
6295 Sign extend each half to HOST_WIDE_INT. */
6296 unsigned HOST_WIDE_INT low
, high
;
6297 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
6298 unsigned bits_per_word
= BITS_PER_WORD
;
6300 /* Set sign_bit to the most significant bit of a word. */
6302 sign_bit
<<= bits_per_word
- 1;
6304 /* Set mask so that all bits of the word are set. We could
6305 have used 1 << BITS_PER_WORD instead of basing the
6306 calculation on sign_bit. However, on machines where
6307 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6308 compiler warning, even though the code would never be
6310 mask
= sign_bit
<< 1;
6313 /* Set sign_extend as any remaining bits. */
6314 sign_extend
= ~mask
;
6316 /* Pick the lower word and sign-extend it. */
6317 low
= INTVAL (value
);
6322 /* Pick the higher word, shifted to the least significant
6323 bits, and sign-extend it. */
6324 high
= INTVAL (value
);
6325 high
>>= bits_per_word
- 1;
6328 if (high
& sign_bit
)
6329 high
|= sign_extend
;
6331 /* Store the words in the target machine order. */
6332 if (WORDS_BIG_ENDIAN
)
6334 *first
= GEN_INT (high
);
6335 *second
= GEN_INT (low
);
6339 *first
= GEN_INT (low
);
6340 *second
= GEN_INT (high
);
6345 /* The rule for using CONST_INT for a wider mode
6346 is that we regard the value as signed.
6347 So sign-extend it. */
6348 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
6349 if (WORDS_BIG_ENDIAN
)
6361 else if (GET_CODE (value
) == CONST_WIDE_INT
)
6363 /* All of this is scary code and needs to be converted to
6364 properly work with any size integer. */
6365 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
6366 if (WORDS_BIG_ENDIAN
)
6368 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6369 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6373 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6374 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6377 else if (!CONST_DOUBLE_P (value
))
6379 if (WORDS_BIG_ENDIAN
)
6381 *first
= const0_rtx
;
6387 *second
= const0_rtx
;
6390 else if (GET_MODE (value
) == VOIDmode
6391 /* This is the old way we did CONST_DOUBLE integers. */
6392 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
6394 /* In an integer, the words are defined as most and least significant.
6395 So order them by the target's convention. */
6396 if (WORDS_BIG_ENDIAN
)
6398 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6399 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
6403 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
6404 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6411 /* Note, this converts the REAL_VALUE_TYPE to the target's
6412 format, splits up the floating point double and outputs
6413 exactly 32 bits of it into each of l[0] and l[1] --
6414 not necessarily BITS_PER_WORD bits. */
6415 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
6417 /* If 32 bits is an entire word for the target, but not for the host,
6418 then sign-extend on the host so that the number will look the same
6419 way on the host that it would on the target. See for instance
6420 simplify_unary_operation. The #if is needed to avoid compiler
6423 #if HOST_BITS_PER_LONG > 32
6424 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
6426 if (l
[0] & ((long) 1 << 31))
6427 l
[0] |= ((unsigned long) (-1) << 32);
6428 if (l
[1] & ((long) 1 << 31))
6429 l
[1] |= ((unsigned long) (-1) << 32);
6433 *first
= GEN_INT (l
[0]);
6434 *second
= GEN_INT (l
[1]);
6438 /* Return true if X is a sign_extract or zero_extract from the least
6442 lsb_bitfield_op_p (rtx x
)
6444 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
6446 machine_mode mode
= GET_MODE (XEXP (x
, 0));
6447 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
6448 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
6449 poly_int64 remaining_bits
= GET_MODE_PRECISION (mode
) - len
;
6451 return known_eq (pos
, BITS_BIG_ENDIAN
? remaining_bits
: 0);
6456 /* Strip outer address "mutations" from LOC and return a pointer to the
6457 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6458 stripped expression there.
6460 "Mutations" either convert between modes or apply some kind of
6461 extension, truncation or alignment. */
6464 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
6468 enum rtx_code code
= GET_CODE (*loc
);
6469 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
6470 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6471 used to convert between pointer sizes. */
6472 loc
= &XEXP (*loc
, 0);
6473 else if (lsb_bitfield_op_p (*loc
))
6474 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6475 acts as a combined truncation and extension. */
6476 loc
= &XEXP (*loc
, 0);
6477 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6478 /* (and ... (const_int -X)) is used to align to X bytes. */
6479 loc
= &XEXP (*loc
, 0);
6480 else if (code
== SUBREG
6481 && !OBJECT_P (SUBREG_REG (*loc
))
6482 && subreg_lowpart_p (*loc
))
6483 /* (subreg (operator ...) ...) inside and is used for mode
6485 loc
= &SUBREG_REG (*loc
);
6493 /* Return true if CODE applies some kind of scale. The scaled value is
6494 is the first operand and the scale is the second. */
6497 binary_scale_code_p (enum rtx_code code
)
6499 return (code
== MULT
6501 /* Needed by ARM targets. */
6505 || code
== ROTATERT
);
6508 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6509 (see address_info). Return null otherwise. */
6512 get_base_term (rtx
*inner
)
6514 if (GET_CODE (*inner
) == LO_SUM
)
6515 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6518 || GET_CODE (*inner
) == SUBREG
6519 || GET_CODE (*inner
) == SCRATCH
)
6524 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6525 (see address_info). Return null otherwise. */
6528 get_index_term (rtx
*inner
)
6530 /* At present, only constant scales are allowed. */
6531 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6532 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6535 || GET_CODE (*inner
) == SUBREG
6536 || GET_CODE (*inner
) == SCRATCH
)
6541 /* Set the segment part of address INFO to LOC, given that INNER is the
6545 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6547 gcc_assert (!info
->segment
);
6548 info
->segment
= loc
;
6549 info
->segment_term
= inner
;
6552 /* Set the base part of address INFO to LOC, given that INNER is the
6556 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6558 gcc_assert (!info
->base
);
6560 info
->base_term
= inner
;
6563 /* Set the index part of address INFO to LOC, given that INNER is the
6567 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6569 gcc_assert (!info
->index
);
6571 info
->index_term
= inner
;
6574 /* Set the displacement part of address INFO to LOC, given that INNER
6575 is the constant term. */
6578 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6580 gcc_assert (!info
->disp
);
6582 info
->disp_term
= inner
;
6585 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6586 rest of INFO accordingly. */
6589 decompose_incdec_address (struct address_info
*info
)
6591 info
->autoinc_p
= true;
6593 rtx
*base
= &XEXP (*info
->inner
, 0);
6594 set_address_base (info
, base
, base
);
6595 gcc_checking_assert (info
->base
== info
->base_term
);
6597 /* These addresses are only valid when the size of the addressed
6599 gcc_checking_assert (info
->mode
!= VOIDmode
);
6602 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6603 of INFO accordingly. */
6606 decompose_automod_address (struct address_info
*info
)
6608 info
->autoinc_p
= true;
6610 rtx
*base
= &XEXP (*info
->inner
, 0);
6611 set_address_base (info
, base
, base
);
6612 gcc_checking_assert (info
->base
== info
->base_term
);
6614 rtx plus
= XEXP (*info
->inner
, 1);
6615 gcc_assert (GET_CODE (plus
) == PLUS
);
6617 info
->base_term2
= &XEXP (plus
, 0);
6618 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6620 rtx
*step
= &XEXP (plus
, 1);
6621 rtx
*inner_step
= strip_address_mutations (step
);
6622 if (CONSTANT_P (*inner_step
))
6623 set_address_disp (info
, step
, inner_step
);
6625 set_address_index (info
, step
, inner_step
);
6628 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6629 values in [PTR, END). Return a pointer to the end of the used array. */
6632 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6635 if (GET_CODE (x
) == PLUS
)
6637 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6638 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6642 gcc_assert (ptr
!= end
);
6648 /* Evaluate the likelihood of X being a base or index value, returning
6649 positive if it is likely to be a base, negative if it is likely to be
6650 an index, and 0 if we can't tell. Make the magnitude of the return
6651 value reflect the amount of confidence we have in the answer.
6653 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6656 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6657 enum rtx_code outer_code
, enum rtx_code index_code
)
6659 /* Believe *_POINTER unless the address shape requires otherwise. */
6660 if (REG_P (x
) && REG_POINTER (x
))
6662 if (MEM_P (x
) && MEM_POINTER (x
))
6665 if (REG_P (x
) && HARD_REGISTER_P (x
))
6667 /* X is a hard register. If it only fits one of the base
6668 or index classes, choose that interpretation. */
6669 int regno
= REGNO (x
);
6670 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6671 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6672 if (base_p
!= index_p
)
6673 return base_p
? 1 : -1;
6678 /* INFO->INNER describes a normal, non-automodified address.
6679 Fill in the rest of INFO accordingly. */
6682 decompose_normal_address (struct address_info
*info
)
6684 /* Treat the address as the sum of up to four values. */
6686 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6687 ops
+ ARRAY_SIZE (ops
)) - ops
;
6689 /* If there is more than one component, any base component is in a PLUS. */
6691 info
->base_outer_code
= PLUS
;
6693 /* Try to classify each sum operand now. Leave those that could be
6694 either a base or an index in OPS. */
6697 for (size_t in
= 0; in
< n_ops
; ++in
)
6700 rtx
*inner
= strip_address_mutations (loc
);
6701 if (CONSTANT_P (*inner
))
6702 set_address_disp (info
, loc
, inner
);
6703 else if (GET_CODE (*inner
) == UNSPEC
)
6704 set_address_segment (info
, loc
, inner
);
6707 /* The only other possibilities are a base or an index. */
6708 rtx
*base_term
= get_base_term (inner
);
6709 rtx
*index_term
= get_index_term (inner
);
6710 gcc_assert (base_term
|| index_term
);
6712 set_address_index (info
, loc
, index_term
);
6713 else if (!index_term
)
6714 set_address_base (info
, loc
, base_term
);
6717 gcc_assert (base_term
== index_term
);
6719 inner_ops
[out
] = base_term
;
6725 /* Classify the remaining OPS members as bases and indexes. */
6728 /* If we haven't seen a base or an index yet, assume that this is
6729 the base. If we were confident that another term was the base
6730 or index, treat the remaining operand as the other kind. */
6732 set_address_base (info
, ops
[0], inner_ops
[0]);
6734 set_address_index (info
, ops
[0], inner_ops
[0]);
6738 /* In the event of a tie, assume the base comes first. */
6739 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6741 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6742 GET_CODE (*ops
[0])))
6744 set_address_base (info
, ops
[0], inner_ops
[0]);
6745 set_address_index (info
, ops
[1], inner_ops
[1]);
6749 set_address_base (info
, ops
[1], inner_ops
[1]);
6750 set_address_index (info
, ops
[0], inner_ops
[0]);
6754 gcc_assert (out
== 0);
6757 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6758 or VOIDmode if not known. AS is the address space associated with LOC.
6759 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6762 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6763 addr_space_t as
, enum rtx_code outer_code
)
6765 memset (info
, 0, sizeof (*info
));
6768 info
->addr_outer_code
= outer_code
;
6770 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6771 info
->base_outer_code
= outer_code
;
6772 switch (GET_CODE (*info
->inner
))
6778 decompose_incdec_address (info
);
6783 decompose_automod_address (info
);
6787 decompose_normal_address (info
);
6792 /* Describe address operand LOC in INFO. */
6795 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6797 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6800 /* Describe the address of MEM X in INFO. */
6803 decompose_mem_address (struct address_info
*info
, rtx x
)
6805 gcc_assert (MEM_P (x
));
6806 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6807 MEM_ADDR_SPACE (x
), MEM
);
6810 /* Update INFO after a change to the address it describes. */
6813 update_address (struct address_info
*info
)
6815 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6816 info
->addr_outer_code
);
6819 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6820 more complicated than that. */
6823 get_index_scale (const struct address_info
*info
)
6825 rtx index
= *info
->index
;
6826 if (GET_CODE (index
) == MULT
6827 && CONST_INT_P (XEXP (index
, 1))
6828 && info
->index_term
== &XEXP (index
, 0))
6829 return INTVAL (XEXP (index
, 1));
6831 if (GET_CODE (index
) == ASHIFT
6832 && CONST_INT_P (XEXP (index
, 1))
6833 && info
->index_term
== &XEXP (index
, 0))
6834 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6836 if (info
->index
== info
->index_term
)
6842 /* Return the "index code" of INFO, in the form required by
6846 get_index_code (const struct address_info
*info
)
6849 return GET_CODE (*info
->index
);
6852 return GET_CODE (*info
->disp
);
6857 /* Return true if RTL X contains a SYMBOL_REF. */
6860 contains_symbol_ref_p (const_rtx x
)
6862 subrtx_iterator::array_type array
;
6863 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6864 if (SYMBOL_REF_P (*iter
))
6870 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6873 contains_symbolic_reference_p (const_rtx x
)
6875 subrtx_iterator::array_type array
;
6876 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6877 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6883 /* Return true if RTL X contains a constant pool address. */
6886 contains_constant_pool_address_p (const_rtx x
)
6888 subrtx_iterator::array_type array
;
6889 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6890 if (SYMBOL_REF_P (*iter
) && CONSTANT_POOL_ADDRESS_P (*iter
))
6897 /* Return true if X contains a thread-local symbol. */
6900 tls_referenced_p (const_rtx x
)
6902 if (!targetm
.have_tls
)
6905 subrtx_iterator::array_type array
;
6906 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6907 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)
6912 /* Process recursively X of INSN and add REG_INC notes if necessary. */
6914 add_auto_inc_notes (rtx_insn
*insn
, rtx x
)
6916 enum rtx_code code
= GET_CODE (x
);
6920 if (code
== MEM
&& auto_inc_p (XEXP (x
, 0)))
6922 add_reg_note (insn
, REG_INC
, XEXP (XEXP (x
, 0), 0));
6926 /* Scan all X sub-expressions. */
6927 fmt
= GET_RTX_FORMAT (code
);
6928 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
6931 add_auto_inc_notes (insn
, XEXP (x
, i
));
6932 else if (fmt
[i
] == 'E')
6933 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6934 add_auto_inc_notes (insn
, XVECEXP (x
, i
, j
));
6938 /* Return true if X is register asm. */
6941 register_asm_p (const_rtx x
)
6944 && REG_EXPR (x
) != NULL_TREE
6945 && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (x
))
6946 && DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (x
))
6947 && DECL_REGISTER (REG_EXPR (x
)));