Remaining support for clobber high
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 #include "hard-reg-set.h"
39
40 /* Forward declarations */
41 static void set_of_1 (rtx, const_rtx, void *);
42 static bool covers_regno_p (const_rtx, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
44 static int computed_jump_p_1 (const_rtx);
45 static void parms_set (rtx, const_rtx, void *);
46
47 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
48 const_rtx, machine_mode,
49 unsigned HOST_WIDE_INT);
50 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
51 const_rtx, machine_mode,
52 unsigned HOST_WIDE_INT);
53 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
54 const_rtx, machine_mode,
55 unsigned int);
56 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
57 const_rtx, machine_mode,
58 unsigned int);
59
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
62
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65 SIGN_EXTEND then while narrowing we also have to enforce the
66 representation and sign-extend the value to mode DESTINATION_REP.
67
68 If the value is already sign-extended to DESTINATION_REP mode we
69 can just switch to DESTINATION mode on it. For each pair of
70 integral modes SOURCE and DESTINATION, when truncating from SOURCE
71 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72 contains the number of high-order bits in SOURCE that have to be
73 copies of the sign-bit so that we can do this mode-switch to
74 DESTINATION. */
75
76 static unsigned int
77 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
78 \f
79 /* Store X into index I of ARRAY. ARRAY is known to have at least I
80 elements. Return the new base of ARRAY. */
81
82 template <typename T>
83 typename T::value_type *
84 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
85 value_type *base,
86 size_t i, value_type x)
87 {
88 if (base == array.stack)
89 {
90 if (i < LOCAL_ELEMS)
91 {
92 base[i] = x;
93 return base;
94 }
95 gcc_checking_assert (i == LOCAL_ELEMS);
96 /* A previous iteration might also have moved from the stack to the
97 heap, in which case the heap array will already be big enough. */
98 if (vec_safe_length (array.heap) <= i)
99 vec_safe_grow (array.heap, i + 1);
100 base = array.heap->address ();
101 memcpy (base, array.stack, sizeof (array.stack));
102 base[LOCAL_ELEMS] = x;
103 return base;
104 }
105 unsigned int length = array.heap->length ();
106 if (length > i)
107 {
108 gcc_checking_assert (base == array.heap->address ());
109 base[i] = x;
110 return base;
111 }
112 else
113 {
114 gcc_checking_assert (i == length);
115 vec_safe_push (array.heap, x);
116 return array.heap->address ();
117 }
118 }
119
120 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
121 number of elements added to the worklist. */
122
123 template <typename T>
124 size_t
125 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
126 value_type *base,
127 size_t end, rtx_type x)
128 {
129 enum rtx_code code = GET_CODE (x);
130 const char *format = GET_RTX_FORMAT (code);
131 size_t orig_end = end;
132 if (__builtin_expect (INSN_P (x), false))
133 {
134 /* Put the pattern at the top of the queue, since that's what
135 we're likely to want most. It also allows for the SEQUENCE
136 code below. */
137 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
138 if (format[i] == 'e')
139 {
140 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
141 if (__builtin_expect (end < LOCAL_ELEMS, true))
142 base[end++] = subx;
143 else
144 base = add_single_to_queue (array, base, end++, subx);
145 }
146 }
147 else
148 for (int i = 0; format[i]; ++i)
149 if (format[i] == 'e')
150 {
151 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
152 if (__builtin_expect (end < LOCAL_ELEMS, true))
153 base[end++] = subx;
154 else
155 base = add_single_to_queue (array, base, end++, subx);
156 }
157 else if (format[i] == 'E')
158 {
159 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
160 rtx *vec = x->u.fld[i].rt_rtvec->elem;
161 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
162 for (unsigned int j = 0; j < length; j++)
163 base[end++] = T::get_value (vec[j]);
164 else
165 for (unsigned int j = 0; j < length; j++)
166 base = add_single_to_queue (array, base, end++,
167 T::get_value (vec[j]));
168 if (code == SEQUENCE && end == length)
169 /* If the subrtxes of the sequence fill the entire array then
170 we know that no other parts of a containing insn are queued.
171 The caller is therefore iterating over the sequence as a
172 PATTERN (...), so we also want the patterns of the
173 subinstructions. */
174 for (unsigned int j = 0; j < length; j++)
175 {
176 typename T::rtx_type x = T::get_rtx (base[j]);
177 if (INSN_P (x))
178 base[j] = T::get_value (PATTERN (x));
179 }
180 }
181 return end - orig_end;
182 }
183
184 template <typename T>
185 void
186 generic_subrtx_iterator <T>::free_array (array_type &array)
187 {
188 vec_free (array.heap);
189 }
190
191 template <typename T>
192 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
193
194 template class generic_subrtx_iterator <const_rtx_accessor>;
195 template class generic_subrtx_iterator <rtx_var_accessor>;
196 template class generic_subrtx_iterator <rtx_ptr_accessor>;
197
198 /* Return 1 if the value of X is unstable
199 (would be different at a different point in the program).
200 The frame pointer, arg pointer, etc. are considered stable
201 (within one function) and so is anything marked `unchanging'. */
202
203 int
204 rtx_unstable_p (const_rtx x)
205 {
206 const RTX_CODE code = GET_CODE (x);
207 int i;
208 const char *fmt;
209
210 switch (code)
211 {
212 case MEM:
213 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
214
215 case CONST:
216 CASE_CONST_ANY:
217 case SYMBOL_REF:
218 case LABEL_REF:
219 return 0;
220
221 case REG:
222 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
223 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
224 /* The arg pointer varies if it is not a fixed register. */
225 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
226 return 0;
227 /* ??? When call-clobbered, the value is stable modulo the restore
228 that must happen after a call. This currently screws up local-alloc
229 into believing that the restore is not needed. */
230 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
231 return 0;
232 return 1;
233
234 case ASM_OPERANDS:
235 if (MEM_VOLATILE_P (x))
236 return 1;
237
238 /* Fall through. */
239
240 default:
241 break;
242 }
243
244 fmt = GET_RTX_FORMAT (code);
245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
246 if (fmt[i] == 'e')
247 {
248 if (rtx_unstable_p (XEXP (x, i)))
249 return 1;
250 }
251 else if (fmt[i] == 'E')
252 {
253 int j;
254 for (j = 0; j < XVECLEN (x, i); j++)
255 if (rtx_unstable_p (XVECEXP (x, i, j)))
256 return 1;
257 }
258
259 return 0;
260 }
261
262 /* Return 1 if X has a value that can vary even between two
263 executions of the program. 0 means X can be compared reliably
264 against certain constants or near-constants.
265 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266 zero, we are slightly more conservative.
267 The frame pointer and the arg pointer are considered constant. */
268
269 bool
270 rtx_varies_p (const_rtx x, bool for_alias)
271 {
272 RTX_CODE code;
273 int i;
274 const char *fmt;
275
276 if (!x)
277 return 0;
278
279 code = GET_CODE (x);
280 switch (code)
281 {
282 case MEM:
283 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
284
285 case CONST:
286 CASE_CONST_ANY:
287 case SYMBOL_REF:
288 case LABEL_REF:
289 return 0;
290
291 case REG:
292 /* Note that we have to test for the actual rtx used for the frame
293 and arg pointers and not just the register number in case we have
294 eliminated the frame and/or arg pointer and are using it
295 for pseudos. */
296 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
297 /* The arg pointer varies if it is not a fixed register. */
298 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
299 return 0;
300 if (x == pic_offset_table_rtx
301 /* ??? When call-clobbered, the value is stable modulo the restore
302 that must happen after a call. This currently screws up
303 local-alloc into believing that the restore is not needed, so we
304 must return 0 only if we are called from alias analysis. */
305 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
306 return 0;
307 return 1;
308
309 case LO_SUM:
310 /* The operand 0 of a LO_SUM is considered constant
311 (in fact it is related specifically to operand 1)
312 during alias analysis. */
313 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
314 || rtx_varies_p (XEXP (x, 1), for_alias);
315
316 case ASM_OPERANDS:
317 if (MEM_VOLATILE_P (x))
318 return 1;
319
320 /* Fall through. */
321
322 default:
323 break;
324 }
325
326 fmt = GET_RTX_FORMAT (code);
327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
328 if (fmt[i] == 'e')
329 {
330 if (rtx_varies_p (XEXP (x, i), for_alias))
331 return 1;
332 }
333 else if (fmt[i] == 'E')
334 {
335 int j;
336 for (j = 0; j < XVECLEN (x, i); j++)
337 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
338 return 1;
339 }
340
341 return 0;
342 }
343
344 /* Compute an approximation for the offset between the register
345 FROM and TO for the current function, as it was at the start
346 of the routine. */
347
348 static poly_int64
349 get_initial_register_offset (int from, int to)
350 {
351 static const struct elim_table_t
352 {
353 const int from;
354 const int to;
355 } table[] = ELIMINABLE_REGS;
356 poly_int64 offset1, offset2;
357 unsigned int i, j;
358
359 if (to == from)
360 return 0;
361
362 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
363 before the reload pass. We need to give at least
364 an estimation for the resulting frame size. */
365 if (! reload_completed)
366 {
367 offset1 = crtl->outgoing_args_size + get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
369 offset1 = - offset1;
370 #endif
371 if (to == STACK_POINTER_REGNUM)
372 return offset1;
373 else if (from == STACK_POINTER_REGNUM)
374 return - offset1;
375 else
376 return 0;
377 }
378
379 for (i = 0; i < ARRAY_SIZE (table); i++)
380 if (table[i].from == from)
381 {
382 if (table[i].to == to)
383 {
384 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
385 offset1);
386 return offset1;
387 }
388 for (j = 0; j < ARRAY_SIZE (table); j++)
389 {
390 if (table[j].to == to
391 && table[j].from == table[i].to)
392 {
393 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
394 offset1);
395 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
396 offset2);
397 return offset1 + offset2;
398 }
399 if (table[j].from == to
400 && table[j].to == table[i].to)
401 {
402 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
403 offset1);
404 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
405 offset2);
406 return offset1 - offset2;
407 }
408 }
409 }
410 else if (table[i].to == from)
411 {
412 if (table[i].from == to)
413 {
414 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
415 offset1);
416 return - offset1;
417 }
418 for (j = 0; j < ARRAY_SIZE (table); j++)
419 {
420 if (table[j].to == to
421 && table[j].from == table[i].from)
422 {
423 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
424 offset1);
425 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
426 offset2);
427 return - offset1 + offset2;
428 }
429 if (table[j].from == to
430 && table[j].to == table[i].from)
431 {
432 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
433 offset1);
434 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
435 offset2);
436 return - offset1 - offset2;
437 }
438 }
439 }
440
441 /* If the requested register combination was not found,
442 try a different more simple combination. */
443 if (from == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
445 else if (to == ARG_POINTER_REGNUM)
446 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
447 else if (from == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
449 else if (to == HARD_FRAME_POINTER_REGNUM)
450 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
451 else
452 return 0;
453 }
454
455 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
456 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
457 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
458 references on strict alignment machines. */
459
460 static int
461 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
462 machine_mode mode, bool unaligned_mems)
463 {
464 enum rtx_code code = GET_CODE (x);
465 gcc_checking_assert (mode == BLKmode || known_size_p (size));
466 poly_int64 const_x1;
467
468 /* The offset must be a multiple of the mode size if we are considering
469 unaligned memory references on strict alignment machines. */
470 if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
471 {
472 poly_int64 actual_offset = offset;
473
474 #ifdef SPARC_STACK_BOUNDARY_HACK
475 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
476 the real alignment of %sp. However, when it does this, the
477 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
478 if (SPARC_STACK_BOUNDARY_HACK
479 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
480 actual_offset -= STACK_POINTER_OFFSET;
481 #endif
482
483 if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
484 return 1;
485 }
486
487 switch (code)
488 {
489 case SYMBOL_REF:
490 if (SYMBOL_REF_WEAK (x))
491 return 1;
492 if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
493 {
494 tree decl;
495 poly_int64 decl_size;
496
497 if (maybe_lt (offset, 0))
498 return 1;
499 if (!known_size_p (size))
500 return maybe_ne (offset, 0);
501
502 /* If the size of the access or of the symbol is unknown,
503 assume the worst. */
504 decl = SYMBOL_REF_DECL (x);
505
506 /* Else check that the access is in bounds. TODO: restructure
507 expr_size/tree_expr_size/int_expr_size and just use the latter. */
508 if (!decl)
509 decl_size = -1;
510 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
511 {
512 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
513 decl_size = -1;
514 }
515 else if (TREE_CODE (decl) == STRING_CST)
516 decl_size = TREE_STRING_LENGTH (decl);
517 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
518 decl_size = int_size_in_bytes (TREE_TYPE (decl));
519 else
520 decl_size = -1;
521
522 return (!known_size_p (decl_size) || known_eq (decl_size, 0)
523 ? maybe_ne (offset, 0)
524 : maybe_gt (offset + size, decl_size));
525 }
526
527 return 0;
528
529 case LABEL_REF:
530 return 0;
531
532 case REG:
533 /* Stack references are assumed not to trap, but we need to deal with
534 nonsensical offsets. */
535 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
536 || x == stack_pointer_rtx
537 /* The arg pointer varies if it is not a fixed register. */
538 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
539 {
540 #ifdef RED_ZONE_SIZE
541 poly_int64 red_zone_size = RED_ZONE_SIZE;
542 #else
543 poly_int64 red_zone_size = 0;
544 #endif
545 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
546 poly_int64 low_bound, high_bound;
547
548 if (!known_size_p (size))
549 return 1;
550
551 if (x == frame_pointer_rtx)
552 {
553 if (FRAME_GROWS_DOWNWARD)
554 {
555 high_bound = targetm.starting_frame_offset ();
556 low_bound = high_bound - get_frame_size ();
557 }
558 else
559 {
560 low_bound = targetm.starting_frame_offset ();
561 high_bound = low_bound + get_frame_size ();
562 }
563 }
564 else if (x == hard_frame_pointer_rtx)
565 {
566 poly_int64 sp_offset
567 = get_initial_register_offset (STACK_POINTER_REGNUM,
568 HARD_FRAME_POINTER_REGNUM);
569 poly_int64 ap_offset
570 = get_initial_register_offset (ARG_POINTER_REGNUM,
571 HARD_FRAME_POINTER_REGNUM);
572
573 #if STACK_GROWS_DOWNWARD
574 low_bound = sp_offset - red_zone_size - stack_boundary;
575 high_bound = ap_offset
576 + FIRST_PARM_OFFSET (current_function_decl)
577 #if !ARGS_GROW_DOWNWARD
578 + crtl->args.size
579 #endif
580 + stack_boundary;
581 #else
582 high_bound = sp_offset + red_zone_size + stack_boundary;
583 low_bound = ap_offset
584 + FIRST_PARM_OFFSET (current_function_decl)
585 #if ARGS_GROW_DOWNWARD
586 - crtl->args.size
587 #endif
588 - stack_boundary;
589 #endif
590 }
591 else if (x == stack_pointer_rtx)
592 {
593 poly_int64 ap_offset
594 = get_initial_register_offset (ARG_POINTER_REGNUM,
595 STACK_POINTER_REGNUM);
596
597 #if STACK_GROWS_DOWNWARD
598 low_bound = - red_zone_size - stack_boundary;
599 high_bound = ap_offset
600 + FIRST_PARM_OFFSET (current_function_decl)
601 #if !ARGS_GROW_DOWNWARD
602 + crtl->args.size
603 #endif
604 + stack_boundary;
605 #else
606 high_bound = red_zone_size + stack_boundary;
607 low_bound = ap_offset
608 + FIRST_PARM_OFFSET (current_function_decl)
609 #if ARGS_GROW_DOWNWARD
610 - crtl->args.size
611 #endif
612 - stack_boundary;
613 #endif
614 }
615 else
616 {
617 /* We assume that accesses are safe to at least the
618 next stack boundary.
619 Examples are varargs and __builtin_return_address. */
620 #if ARGS_GROW_DOWNWARD
621 high_bound = FIRST_PARM_OFFSET (current_function_decl)
622 + stack_boundary;
623 low_bound = FIRST_PARM_OFFSET (current_function_decl)
624 - crtl->args.size - stack_boundary;
625 #else
626 low_bound = FIRST_PARM_OFFSET (current_function_decl)
627 - stack_boundary;
628 high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 + crtl->args.size + stack_boundary;
630 #endif
631 }
632
633 if (known_ge (offset, low_bound)
634 && known_le (offset, high_bound - size))
635 return 0;
636 return 1;
637 }
638 /* All of the virtual frame registers are stack references. */
639 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
640 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
641 return 0;
642 return 1;
643
644 case CONST:
645 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
646 mode, unaligned_mems);
647
648 case PLUS:
649 /* An address is assumed not to trap if:
650 - it is the pic register plus a const unspec without offset. */
651 if (XEXP (x, 0) == pic_offset_table_rtx
652 && GET_CODE (XEXP (x, 1)) == CONST
653 && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
654 && known_eq (offset, 0))
655 return 0;
656
657 /* - or it is an address that can't trap plus a constant integer. */
658 if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
659 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
660 size, mode, unaligned_mems))
661 return 0;
662
663 return 1;
664
665 case LO_SUM:
666 case PRE_MODIFY:
667 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
668 mode, unaligned_mems);
669
670 case PRE_DEC:
671 case PRE_INC:
672 case POST_DEC:
673 case POST_INC:
674 case POST_MODIFY:
675 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
676 mode, unaligned_mems);
677
678 default:
679 break;
680 }
681
682 /* If it isn't one of the case above, it can cause a trap. */
683 return 1;
684 }
685
686 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
687
688 int
689 rtx_addr_can_trap_p (const_rtx x)
690 {
691 return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
692 }
693
694 /* Return true if X contains a MEM subrtx. */
695
696 bool
697 contains_mem_rtx_p (rtx x)
698 {
699 subrtx_iterator::array_type array;
700 FOR_EACH_SUBRTX (iter, array, x, ALL)
701 if (MEM_P (*iter))
702 return true;
703
704 return false;
705 }
706
707 /* Return true if X is an address that is known to not be zero. */
708
709 bool
710 nonzero_address_p (const_rtx x)
711 {
712 const enum rtx_code code = GET_CODE (x);
713
714 switch (code)
715 {
716 case SYMBOL_REF:
717 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
718
719 case LABEL_REF:
720 return true;
721
722 case REG:
723 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
724 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
725 || x == stack_pointer_rtx
726 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
727 return true;
728 /* All of the virtual frame registers are stack references. */
729 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
730 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
731 return true;
732 return false;
733
734 case CONST:
735 return nonzero_address_p (XEXP (x, 0));
736
737 case PLUS:
738 /* Handle PIC references. */
739 if (XEXP (x, 0) == pic_offset_table_rtx
740 && CONSTANT_P (XEXP (x, 1)))
741 return true;
742 return false;
743
744 case PRE_MODIFY:
745 /* Similar to the above; allow positive offsets. Further, since
746 auto-inc is only allowed in memories, the register must be a
747 pointer. */
748 if (CONST_INT_P (XEXP (x, 1))
749 && INTVAL (XEXP (x, 1)) > 0)
750 return true;
751 return nonzero_address_p (XEXP (x, 0));
752
753 case PRE_INC:
754 /* Similarly. Further, the offset is always positive. */
755 return true;
756
757 case PRE_DEC:
758 case POST_DEC:
759 case POST_INC:
760 case POST_MODIFY:
761 return nonzero_address_p (XEXP (x, 0));
762
763 case LO_SUM:
764 return nonzero_address_p (XEXP (x, 1));
765
766 default:
767 break;
768 }
769
770 /* If it isn't one of the case above, might be zero. */
771 return false;
772 }
773
774 /* Return 1 if X refers to a memory location whose address
775 cannot be compared reliably with constant addresses,
776 or if X refers to a BLKmode memory object.
777 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
778 zero, we are slightly more conservative. */
779
780 bool
781 rtx_addr_varies_p (const_rtx x, bool for_alias)
782 {
783 enum rtx_code code;
784 int i;
785 const char *fmt;
786
787 if (x == 0)
788 return 0;
789
790 code = GET_CODE (x);
791 if (code == MEM)
792 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
793
794 fmt = GET_RTX_FORMAT (code);
795 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
796 if (fmt[i] == 'e')
797 {
798 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
799 return 1;
800 }
801 else if (fmt[i] == 'E')
802 {
803 int j;
804 for (j = 0; j < XVECLEN (x, i); j++)
805 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
806 return 1;
807 }
808 return 0;
809 }
810 \f
811 /* Return the CALL in X if there is one. */
812
813 rtx
814 get_call_rtx_from (rtx x)
815 {
816 if (INSN_P (x))
817 x = PATTERN (x);
818 if (GET_CODE (x) == PARALLEL)
819 x = XVECEXP (x, 0, 0);
820 if (GET_CODE (x) == SET)
821 x = SET_SRC (x);
822 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
823 return x;
824 return NULL_RTX;
825 }
826 \f
827 /* Return the value of the integer term in X, if one is apparent;
828 otherwise return 0.
829 Only obvious integer terms are detected.
830 This is used in cse.c with the `related_value' field. */
831
832 HOST_WIDE_INT
833 get_integer_term (const_rtx x)
834 {
835 if (GET_CODE (x) == CONST)
836 x = XEXP (x, 0);
837
838 if (GET_CODE (x) == MINUS
839 && CONST_INT_P (XEXP (x, 1)))
840 return - INTVAL (XEXP (x, 1));
841 if (GET_CODE (x) == PLUS
842 && CONST_INT_P (XEXP (x, 1)))
843 return INTVAL (XEXP (x, 1));
844 return 0;
845 }
846
847 /* If X is a constant, return the value sans apparent integer term;
848 otherwise return 0.
849 Only obvious integer terms are detected. */
850
851 rtx
852 get_related_value (const_rtx x)
853 {
854 if (GET_CODE (x) != CONST)
855 return 0;
856 x = XEXP (x, 0);
857 if (GET_CODE (x) == PLUS
858 && CONST_INT_P (XEXP (x, 1)))
859 return XEXP (x, 0);
860 else if (GET_CODE (x) == MINUS
861 && CONST_INT_P (XEXP (x, 1)))
862 return XEXP (x, 0);
863 return 0;
864 }
865 \f
866 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
867 to somewhere in the same object or object_block as SYMBOL. */
868
869 bool
870 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
871 {
872 tree decl;
873
874 if (GET_CODE (symbol) != SYMBOL_REF)
875 return false;
876
877 if (offset == 0)
878 return true;
879
880 if (offset > 0)
881 {
882 if (CONSTANT_POOL_ADDRESS_P (symbol)
883 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
884 return true;
885
886 decl = SYMBOL_REF_DECL (symbol);
887 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
888 return true;
889 }
890
891 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
892 && SYMBOL_REF_BLOCK (symbol)
893 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
894 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
895 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
896 return true;
897
898 return false;
899 }
900
901 /* Split X into a base and a constant offset, storing them in *BASE_OUT
902 and *OFFSET_OUT respectively. */
903
904 void
905 split_const (rtx x, rtx *base_out, rtx *offset_out)
906 {
907 if (GET_CODE (x) == CONST)
908 {
909 x = XEXP (x, 0);
910 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
911 {
912 *base_out = XEXP (x, 0);
913 *offset_out = XEXP (x, 1);
914 return;
915 }
916 }
917 *base_out = x;
918 *offset_out = const0_rtx;
919 }
920
921 /* Express integer value X as some value Y plus a polynomial offset,
922 where Y is either const0_rtx, X or something within X (as opposed
923 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
924
925 rtx
926 strip_offset (rtx x, poly_int64_pod *offset_out)
927 {
928 rtx base = const0_rtx;
929 rtx test = x;
930 if (GET_CODE (test) == CONST)
931 test = XEXP (test, 0);
932 if (GET_CODE (test) == PLUS)
933 {
934 base = XEXP (test, 0);
935 test = XEXP (test, 1);
936 }
937 if (poly_int_rtx_p (test, offset_out))
938 return base;
939 *offset_out = 0;
940 return x;
941 }
942
943 /* Return the argument size in REG_ARGS_SIZE note X. */
944
945 poly_int64
946 get_args_size (const_rtx x)
947 {
948 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
949 return rtx_to_poly_int64 (XEXP (x, 0));
950 }
951 \f
952 /* Return the number of places FIND appears within X. If COUNT_DEST is
953 zero, we do not count occurrences inside the destination of a SET. */
954
955 int
956 count_occurrences (const_rtx x, const_rtx find, int count_dest)
957 {
958 int i, j;
959 enum rtx_code code;
960 const char *format_ptr;
961 int count;
962
963 if (x == find)
964 return 1;
965
966 code = GET_CODE (x);
967
968 switch (code)
969 {
970 case REG:
971 CASE_CONST_ANY:
972 case SYMBOL_REF:
973 case CODE_LABEL:
974 case PC:
975 case CC0:
976 return 0;
977
978 case EXPR_LIST:
979 count = count_occurrences (XEXP (x, 0), find, count_dest);
980 if (XEXP (x, 1))
981 count += count_occurrences (XEXP (x, 1), find, count_dest);
982 return count;
983
984 case MEM:
985 if (MEM_P (find) && rtx_equal_p (x, find))
986 return 1;
987 break;
988
989 case SET:
990 if (SET_DEST (x) == find && ! count_dest)
991 return count_occurrences (SET_SRC (x), find, count_dest);
992 break;
993
994 default:
995 break;
996 }
997
998 format_ptr = GET_RTX_FORMAT (code);
999 count = 0;
1000
1001 for (i = 0; i < GET_RTX_LENGTH (code); i++)
1002 {
1003 switch (*format_ptr++)
1004 {
1005 case 'e':
1006 count += count_occurrences (XEXP (x, i), find, count_dest);
1007 break;
1008
1009 case 'E':
1010 for (j = 0; j < XVECLEN (x, i); j++)
1011 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1012 break;
1013 }
1014 }
1015 return count;
1016 }
1017
1018 \f
1019 /* Return TRUE if OP is a register or subreg of a register that
1020 holds an unsigned quantity. Otherwise, return FALSE. */
1021
1022 bool
1023 unsigned_reg_p (rtx op)
1024 {
1025 if (REG_P (op)
1026 && REG_EXPR (op)
1027 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1028 return true;
1029
1030 if (GET_CODE (op) == SUBREG
1031 && SUBREG_PROMOTED_SIGN (op))
1032 return true;
1033
1034 return false;
1035 }
1036
1037 \f
1038 /* Nonzero if register REG appears somewhere within IN.
1039 Also works if REG is not a register; in this case it checks
1040 for a subexpression of IN that is Lisp "equal" to REG. */
1041
1042 int
1043 reg_mentioned_p (const_rtx reg, const_rtx in)
1044 {
1045 const char *fmt;
1046 int i;
1047 enum rtx_code code;
1048
1049 if (in == 0)
1050 return 0;
1051
1052 if (reg == in)
1053 return 1;
1054
1055 if (GET_CODE (in) == LABEL_REF)
1056 return reg == label_ref_label (in);
1057
1058 code = GET_CODE (in);
1059
1060 switch (code)
1061 {
1062 /* Compare registers by number. */
1063 case REG:
1064 return REG_P (reg) && REGNO (in) == REGNO (reg);
1065
1066 /* These codes have no constituent expressions
1067 and are unique. */
1068 case SCRATCH:
1069 case CC0:
1070 case PC:
1071 return 0;
1072
1073 CASE_CONST_ANY:
1074 /* These are kept unique for a given value. */
1075 return 0;
1076
1077 default:
1078 break;
1079 }
1080
1081 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1082 return 1;
1083
1084 fmt = GET_RTX_FORMAT (code);
1085
1086 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1087 {
1088 if (fmt[i] == 'E')
1089 {
1090 int j;
1091 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1092 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1093 return 1;
1094 }
1095 else if (fmt[i] == 'e'
1096 && reg_mentioned_p (reg, XEXP (in, i)))
1097 return 1;
1098 }
1099 return 0;
1100 }
1101 \f
1102 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1103 no CODE_LABEL insn. */
1104
1105 int
1106 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1107 {
1108 rtx_insn *p;
1109 if (beg == end)
1110 return 0;
1111 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1112 if (LABEL_P (p))
1113 return 0;
1114 return 1;
1115 }
1116
1117 /* Nonzero if register REG is used in an insn between
1118 FROM_INSN and TO_INSN (exclusive of those two). */
1119
1120 int
1121 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1122 const rtx_insn *to_insn)
1123 {
1124 rtx_insn *insn;
1125
1126 if (from_insn == to_insn)
1127 return 0;
1128
1129 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1130 if (NONDEBUG_INSN_P (insn)
1131 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1132 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1133 return 1;
1134 return 0;
1135 }
1136 \f
1137 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1138 is entirely replaced by a new value and the only use is as a SET_DEST,
1139 we do not consider it a reference. */
1140
1141 int
1142 reg_referenced_p (const_rtx x, const_rtx body)
1143 {
1144 int i;
1145
1146 switch (GET_CODE (body))
1147 {
1148 case SET:
1149 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1150 return 1;
1151
1152 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1153 of a REG that occupies all of the REG, the insn references X if
1154 it is mentioned in the destination. */
1155 if (GET_CODE (SET_DEST (body)) != CC0
1156 && GET_CODE (SET_DEST (body)) != PC
1157 && !REG_P (SET_DEST (body))
1158 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1159 && REG_P (SUBREG_REG (SET_DEST (body)))
1160 && !read_modify_subreg_p (SET_DEST (body)))
1161 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1162 return 1;
1163 return 0;
1164
1165 case ASM_OPERANDS:
1166 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1167 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1168 return 1;
1169 return 0;
1170
1171 case CALL:
1172 case USE:
1173 case IF_THEN_ELSE:
1174 return reg_overlap_mentioned_p (x, body);
1175
1176 case TRAP_IF:
1177 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1178
1179 case PREFETCH:
1180 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1181
1182 case UNSPEC:
1183 case UNSPEC_VOLATILE:
1184 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1185 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1186 return 1;
1187 return 0;
1188
1189 case PARALLEL:
1190 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1191 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1192 return 1;
1193 return 0;
1194
1195 case CLOBBER:
1196 if (MEM_P (XEXP (body, 0)))
1197 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1198 return 1;
1199 return 0;
1200
1201 case CLOBBER_HIGH:
1202 gcc_assert (REG_P (XEXP (body, 0)));
1203 return 0;
1204
1205 case COND_EXEC:
1206 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1207 return 1;
1208 return reg_referenced_p (x, COND_EXEC_CODE (body));
1209
1210 default:
1211 return 0;
1212 }
1213 }
1214 \f
1215 /* Nonzero if register REG is set or clobbered in an insn between
1216 FROM_INSN and TO_INSN (exclusive of those two). */
1217
1218 int
1219 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1220 const rtx_insn *to_insn)
1221 {
1222 const rtx_insn *insn;
1223
1224 if (from_insn == to_insn)
1225 return 0;
1226
1227 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1228 if (INSN_P (insn) && reg_set_p (reg, insn))
1229 return 1;
1230 return 0;
1231 }
1232
1233 /* Return true if REG is set or clobbered inside INSN. */
1234
1235 int
1236 reg_set_p (const_rtx reg, const_rtx insn)
1237 {
1238 /* After delay slot handling, call and branch insns might be in a
1239 sequence. Check all the elements there. */
1240 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1241 {
1242 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1243 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1244 return true;
1245
1246 return false;
1247 }
1248
1249 /* We can be passed an insn or part of one. If we are passed an insn,
1250 check if a side-effect of the insn clobbers REG. */
1251 if (INSN_P (insn)
1252 && (FIND_REG_INC_NOTE (insn, reg)
1253 || (CALL_P (insn)
1254 && ((REG_P (reg)
1255 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1256 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1257 GET_MODE (reg), REGNO (reg)))
1258 || MEM_P (reg)
1259 || find_reg_fusage (insn, CLOBBER, reg)))))
1260 return true;
1261
1262 /* There are no REG_INC notes for SP autoinc. */
1263 if (reg == stack_pointer_rtx && INSN_P (insn))
1264 {
1265 subrtx_var_iterator::array_type array;
1266 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1267 {
1268 rtx mem = *iter;
1269 if (mem
1270 && MEM_P (mem)
1271 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1272 {
1273 if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1274 return true;
1275 iter.skip_subrtxes ();
1276 }
1277 }
1278 }
1279
1280 return set_of (reg, insn) != NULL_RTX;
1281 }
1282
1283 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1284 only if none of them are modified between START and END. Return 1 if
1285 X contains a MEM; this routine does use memory aliasing. */
1286
1287 int
1288 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1289 {
1290 const enum rtx_code code = GET_CODE (x);
1291 const char *fmt;
1292 int i, j;
1293 rtx_insn *insn;
1294
1295 if (start == end)
1296 return 0;
1297
1298 switch (code)
1299 {
1300 CASE_CONST_ANY:
1301 case CONST:
1302 case SYMBOL_REF:
1303 case LABEL_REF:
1304 return 0;
1305
1306 case PC:
1307 case CC0:
1308 return 1;
1309
1310 case MEM:
1311 if (modified_between_p (XEXP (x, 0), start, end))
1312 return 1;
1313 if (MEM_READONLY_P (x))
1314 return 0;
1315 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1316 if (memory_modified_in_insn_p (x, insn))
1317 return 1;
1318 return 0;
1319
1320 case REG:
1321 return reg_set_between_p (x, start, end);
1322
1323 default:
1324 break;
1325 }
1326
1327 fmt = GET_RTX_FORMAT (code);
1328 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1329 {
1330 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1331 return 1;
1332
1333 else if (fmt[i] == 'E')
1334 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1335 if (modified_between_p (XVECEXP (x, i, j), start, end))
1336 return 1;
1337 }
1338
1339 return 0;
1340 }
1341
1342 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1343 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1344 does use memory aliasing. */
1345
1346 int
1347 modified_in_p (const_rtx x, const_rtx insn)
1348 {
1349 const enum rtx_code code = GET_CODE (x);
1350 const char *fmt;
1351 int i, j;
1352
1353 switch (code)
1354 {
1355 CASE_CONST_ANY:
1356 case CONST:
1357 case SYMBOL_REF:
1358 case LABEL_REF:
1359 return 0;
1360
1361 case PC:
1362 case CC0:
1363 return 1;
1364
1365 case MEM:
1366 if (modified_in_p (XEXP (x, 0), insn))
1367 return 1;
1368 if (MEM_READONLY_P (x))
1369 return 0;
1370 if (memory_modified_in_insn_p (x, insn))
1371 return 1;
1372 return 0;
1373
1374 case REG:
1375 return reg_set_p (x, insn);
1376
1377 default:
1378 break;
1379 }
1380
1381 fmt = GET_RTX_FORMAT (code);
1382 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1383 {
1384 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1385 return 1;
1386
1387 else if (fmt[i] == 'E')
1388 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1389 if (modified_in_p (XVECEXP (x, i, j), insn))
1390 return 1;
1391 }
1392
1393 return 0;
1394 }
1395
1396 /* Return true if X is a SUBREG and if storing a value to X would
1397 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1398 target, using a SUBREG to store to one half of a DImode REG would
1399 preserve the other half. */
1400
1401 bool
1402 read_modify_subreg_p (const_rtx x)
1403 {
1404 if (GET_CODE (x) != SUBREG)
1405 return false;
1406 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1407 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1408 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1409 /* The inner and outer modes of a subreg must be ordered, so that we
1410 can tell whether they're paradoxical or partial. */
1411 gcc_checking_assert (ordered_p (isize, osize));
1412 return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1413 }
1414 \f
1415 /* Helper function for set_of. */
1416 struct set_of_data
1417 {
1418 const_rtx found;
1419 const_rtx pat;
1420 };
1421
1422 static void
1423 set_of_1 (rtx x, const_rtx pat, void *data1)
1424 {
1425 struct set_of_data *const data = (struct set_of_data *) (data1);
1426 if (rtx_equal_p (x, data->pat)
1427 || (GET_CODE (pat) == CLOBBER_HIGH
1428 && REGNO(data->pat) == REGNO(XEXP (pat, 0))
1429 && reg_is_clobbered_by_clobber_high (data->pat, XEXP (pat, 0)))
1430 || (GET_CODE (pat) != CLOBBER_HIGH && !MEM_P (x)
1431 && reg_overlap_mentioned_p (data->pat, x)))
1432 data->found = pat;
1433 }
1434
1435 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1436 (either directly or via STRICT_LOW_PART and similar modifiers). */
1437 const_rtx
1438 set_of (const_rtx pat, const_rtx insn)
1439 {
1440 struct set_of_data data;
1441 data.found = NULL_RTX;
1442 data.pat = pat;
1443 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1444 return data.found;
1445 }
1446
1447 /* Add all hard register in X to *PSET. */
1448 void
1449 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1450 {
1451 subrtx_iterator::array_type array;
1452 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1453 {
1454 const_rtx x = *iter;
1455 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1456 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1457 }
1458 }
1459
1460 /* This function, called through note_stores, collects sets and
1461 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1462 by DATA. */
1463 void
1464 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1465 {
1466 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1467 if (REG_P (x) && HARD_REGISTER_P (x))
1468 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1469 }
1470
1471 /* Examine INSN, and compute the set of hard registers written by it.
1472 Store it in *PSET. Should only be called after reload. */
1473 void
1474 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1475 {
1476 rtx link;
1477
1478 CLEAR_HARD_REG_SET (*pset);
1479 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1480 if (CALL_P (insn))
1481 {
1482 if (implicit)
1483 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1484
1485 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1486 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1487 }
1488 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1489 if (REG_NOTE_KIND (link) == REG_INC)
1490 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1491 }
1492
1493 /* Like record_hard_reg_sets, but called through note_uses. */
1494 void
1495 record_hard_reg_uses (rtx *px, void *data)
1496 {
1497 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1498 }
1499 \f
1500 /* Given an INSN, return a SET expression if this insn has only a single SET.
1501 It may also have CLOBBERs, USEs, or SET whose output
1502 will not be used, which we ignore. */
1503
1504 rtx
1505 single_set_2 (const rtx_insn *insn, const_rtx pat)
1506 {
1507 rtx set = NULL;
1508 int set_verified = 1;
1509 int i;
1510
1511 if (GET_CODE (pat) == PARALLEL)
1512 {
1513 for (i = 0; i < XVECLEN (pat, 0); i++)
1514 {
1515 rtx sub = XVECEXP (pat, 0, i);
1516 switch (GET_CODE (sub))
1517 {
1518 case USE:
1519 case CLOBBER:
1520 case CLOBBER_HIGH:
1521 break;
1522
1523 case SET:
1524 /* We can consider insns having multiple sets, where all
1525 but one are dead as single set insns. In common case
1526 only single set is present in the pattern so we want
1527 to avoid checking for REG_UNUSED notes unless necessary.
1528
1529 When we reach set first time, we just expect this is
1530 the single set we are looking for and only when more
1531 sets are found in the insn, we check them. */
1532 if (!set_verified)
1533 {
1534 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1535 && !side_effects_p (set))
1536 set = NULL;
1537 else
1538 set_verified = 1;
1539 }
1540 if (!set)
1541 set = sub, set_verified = 0;
1542 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1543 || side_effects_p (sub))
1544 return NULL_RTX;
1545 break;
1546
1547 default:
1548 return NULL_RTX;
1549 }
1550 }
1551 }
1552 return set;
1553 }
1554
1555 /* Given an INSN, return nonzero if it has more than one SET, else return
1556 zero. */
1557
1558 int
1559 multiple_sets (const_rtx insn)
1560 {
1561 int found;
1562 int i;
1563
1564 /* INSN must be an insn. */
1565 if (! INSN_P (insn))
1566 return 0;
1567
1568 /* Only a PARALLEL can have multiple SETs. */
1569 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1570 {
1571 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1572 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1573 {
1574 /* If we have already found a SET, then return now. */
1575 if (found)
1576 return 1;
1577 else
1578 found = 1;
1579 }
1580 }
1581
1582 /* Either zero or one SET. */
1583 return 0;
1584 }
1585 \f
1586 /* Return nonzero if the destination of SET equals the source
1587 and there are no side effects. */
1588
1589 int
1590 set_noop_p (const_rtx set)
1591 {
1592 rtx src = SET_SRC (set);
1593 rtx dst = SET_DEST (set);
1594
1595 if (dst == pc_rtx && src == pc_rtx)
1596 return 1;
1597
1598 if (MEM_P (dst) && MEM_P (src))
1599 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1600
1601 if (GET_CODE (dst) == ZERO_EXTRACT)
1602 return rtx_equal_p (XEXP (dst, 0), src)
1603 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1604 && !side_effects_p (src);
1605
1606 if (GET_CODE (dst) == STRICT_LOW_PART)
1607 dst = XEXP (dst, 0);
1608
1609 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1610 {
1611 if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1612 return 0;
1613 src = SUBREG_REG (src);
1614 dst = SUBREG_REG (dst);
1615 }
1616
1617 /* It is a NOOP if destination overlaps with selected src vector
1618 elements. */
1619 if (GET_CODE (src) == VEC_SELECT
1620 && REG_P (XEXP (src, 0)) && REG_P (dst)
1621 && HARD_REGISTER_P (XEXP (src, 0))
1622 && HARD_REGISTER_P (dst))
1623 {
1624 int i;
1625 rtx par = XEXP (src, 1);
1626 rtx src0 = XEXP (src, 0);
1627 poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
1628 poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1629
1630 for (i = 1; i < XVECLEN (par, 0); i++)
1631 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
1632 return 0;
1633 return
1634 REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1635 && simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1636 offset, GET_MODE (dst)) == (int) REGNO (dst);
1637 }
1638
1639 return (REG_P (src) && REG_P (dst)
1640 && REGNO (src) == REGNO (dst));
1641 }
1642 \f
1643 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1644 value to itself. */
1645
1646 int
1647 noop_move_p (const rtx_insn *insn)
1648 {
1649 rtx pat = PATTERN (insn);
1650
1651 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1652 return 1;
1653
1654 /* Insns carrying these notes are useful later on. */
1655 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1656 return 0;
1657
1658 /* Check the code to be executed for COND_EXEC. */
1659 if (GET_CODE (pat) == COND_EXEC)
1660 pat = COND_EXEC_CODE (pat);
1661
1662 if (GET_CODE (pat) == SET && set_noop_p (pat))
1663 return 1;
1664
1665 if (GET_CODE (pat) == PARALLEL)
1666 {
1667 int i;
1668 /* If nothing but SETs of registers to themselves,
1669 this insn can also be deleted. */
1670 for (i = 0; i < XVECLEN (pat, 0); i++)
1671 {
1672 rtx tem = XVECEXP (pat, 0, i);
1673
1674 if (GET_CODE (tem) == USE
1675 || GET_CODE (tem) == CLOBBER
1676 || GET_CODE (tem) == CLOBBER_HIGH)
1677 continue;
1678
1679 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1680 return 0;
1681 }
1682
1683 return 1;
1684 }
1685 return 0;
1686 }
1687 \f
1688
1689 /* Return nonzero if register in range [REGNO, ENDREGNO)
1690 appears either explicitly or implicitly in X
1691 other than being stored into.
1692
1693 References contained within the substructure at LOC do not count.
1694 LOC may be zero, meaning don't ignore anything. */
1695
1696 bool
1697 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1698 rtx *loc)
1699 {
1700 int i;
1701 unsigned int x_regno;
1702 RTX_CODE code;
1703 const char *fmt;
1704
1705 repeat:
1706 /* The contents of a REG_NONNEG note is always zero, so we must come here
1707 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1708 if (x == 0)
1709 return false;
1710
1711 code = GET_CODE (x);
1712
1713 switch (code)
1714 {
1715 case REG:
1716 x_regno = REGNO (x);
1717
1718 /* If we modifying the stack, frame, or argument pointer, it will
1719 clobber a virtual register. In fact, we could be more precise,
1720 but it isn't worth it. */
1721 if ((x_regno == STACK_POINTER_REGNUM
1722 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1723 && x_regno == ARG_POINTER_REGNUM)
1724 || x_regno == FRAME_POINTER_REGNUM)
1725 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1726 return true;
1727
1728 return endregno > x_regno && regno < END_REGNO (x);
1729
1730 case SUBREG:
1731 /* If this is a SUBREG of a hard reg, we can see exactly which
1732 registers are being modified. Otherwise, handle normally. */
1733 if (REG_P (SUBREG_REG (x))
1734 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1735 {
1736 unsigned int inner_regno = subreg_regno (x);
1737 unsigned int inner_endregno
1738 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1739 ? subreg_nregs (x) : 1);
1740
1741 return endregno > inner_regno && regno < inner_endregno;
1742 }
1743 break;
1744
1745 case CLOBBER:
1746 case SET:
1747 if (&SET_DEST (x) != loc
1748 /* Note setting a SUBREG counts as referring to the REG it is in for
1749 a pseudo but not for hard registers since we can
1750 treat each word individually. */
1751 && ((GET_CODE (SET_DEST (x)) == SUBREG
1752 && loc != &SUBREG_REG (SET_DEST (x))
1753 && REG_P (SUBREG_REG (SET_DEST (x)))
1754 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1755 && refers_to_regno_p (regno, endregno,
1756 SUBREG_REG (SET_DEST (x)), loc))
1757 || (!REG_P (SET_DEST (x))
1758 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1759 return true;
1760
1761 if (code == CLOBBER || loc == &SET_SRC (x))
1762 return false;
1763 x = SET_SRC (x);
1764 goto repeat;
1765
1766 default:
1767 break;
1768 }
1769
1770 /* X does not match, so try its subexpressions. */
1771
1772 fmt = GET_RTX_FORMAT (code);
1773 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1774 {
1775 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1776 {
1777 if (i == 0)
1778 {
1779 x = XEXP (x, 0);
1780 goto repeat;
1781 }
1782 else
1783 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1784 return true;
1785 }
1786 else if (fmt[i] == 'E')
1787 {
1788 int j;
1789 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1790 if (loc != &XVECEXP (x, i, j)
1791 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1792 return true;
1793 }
1794 }
1795 return false;
1796 }
1797
1798 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1799 we check if any register number in X conflicts with the relevant register
1800 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1801 contains a MEM (we don't bother checking for memory addresses that can't
1802 conflict because we expect this to be a rare case. */
1803
1804 int
1805 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1806 {
1807 unsigned int regno, endregno;
1808
1809 /* If either argument is a constant, then modifying X can not
1810 affect IN. Here we look at IN, we can profitably combine
1811 CONSTANT_P (x) with the switch statement below. */
1812 if (CONSTANT_P (in))
1813 return 0;
1814
1815 recurse:
1816 switch (GET_CODE (x))
1817 {
1818 case STRICT_LOW_PART:
1819 case ZERO_EXTRACT:
1820 case SIGN_EXTRACT:
1821 /* Overly conservative. */
1822 x = XEXP (x, 0);
1823 goto recurse;
1824
1825 case SUBREG:
1826 regno = REGNO (SUBREG_REG (x));
1827 if (regno < FIRST_PSEUDO_REGISTER)
1828 regno = subreg_regno (x);
1829 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1830 ? subreg_nregs (x) : 1);
1831 goto do_reg;
1832
1833 case REG:
1834 regno = REGNO (x);
1835 endregno = END_REGNO (x);
1836 do_reg:
1837 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1838
1839 case MEM:
1840 {
1841 const char *fmt;
1842 int i;
1843
1844 if (MEM_P (in))
1845 return 1;
1846
1847 fmt = GET_RTX_FORMAT (GET_CODE (in));
1848 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1849 if (fmt[i] == 'e')
1850 {
1851 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1852 return 1;
1853 }
1854 else if (fmt[i] == 'E')
1855 {
1856 int j;
1857 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1858 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1859 return 1;
1860 }
1861
1862 return 0;
1863 }
1864
1865 case SCRATCH:
1866 case PC:
1867 case CC0:
1868 return reg_mentioned_p (x, in);
1869
1870 case PARALLEL:
1871 {
1872 int i;
1873
1874 /* If any register in here refers to it we return true. */
1875 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1876 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1877 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1878 return 1;
1879 return 0;
1880 }
1881
1882 default:
1883 gcc_assert (CONSTANT_P (x));
1884 return 0;
1885 }
1886 }
1887 \f
1888 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1889 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1890 ignored by note_stores, but passed to FUN.
1891
1892 FUN receives three arguments:
1893 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1894 2. the SET or CLOBBER rtx that does the store,
1895 3. the pointer DATA provided to note_stores.
1896
1897 If the item being stored in or clobbered is a SUBREG of a hard register,
1898 the SUBREG will be passed. */
1899
1900 void
1901 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1902 {
1903 int i;
1904
1905 if (GET_CODE (x) == COND_EXEC)
1906 x = COND_EXEC_CODE (x);
1907
1908 if (GET_CODE (x) == SET
1909 || GET_CODE (x) == CLOBBER
1910 || GET_CODE (x) == CLOBBER_HIGH)
1911 {
1912 rtx dest = SET_DEST (x);
1913
1914 while ((GET_CODE (dest) == SUBREG
1915 && (!REG_P (SUBREG_REG (dest))
1916 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1917 || GET_CODE (dest) == ZERO_EXTRACT
1918 || GET_CODE (dest) == STRICT_LOW_PART)
1919 dest = XEXP (dest, 0);
1920
1921 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1922 each of whose first operand is a register. */
1923 if (GET_CODE (dest) == PARALLEL)
1924 {
1925 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1926 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1927 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1928 }
1929 else
1930 (*fun) (dest, x, data);
1931 }
1932
1933 else if (GET_CODE (x) == PARALLEL)
1934 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1935 note_stores (XVECEXP (x, 0, i), fun, data);
1936 }
1937 \f
1938 /* Like notes_stores, but call FUN for each expression that is being
1939 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1940 FUN for each expression, not any interior subexpressions. FUN receives a
1941 pointer to the expression and the DATA passed to this function.
1942
1943 Note that this is not quite the same test as that done in reg_referenced_p
1944 since that considers something as being referenced if it is being
1945 partially set, while we do not. */
1946
1947 void
1948 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1949 {
1950 rtx body = *pbody;
1951 int i;
1952
1953 switch (GET_CODE (body))
1954 {
1955 case COND_EXEC:
1956 (*fun) (&COND_EXEC_TEST (body), data);
1957 note_uses (&COND_EXEC_CODE (body), fun, data);
1958 return;
1959
1960 case PARALLEL:
1961 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1962 note_uses (&XVECEXP (body, 0, i), fun, data);
1963 return;
1964
1965 case SEQUENCE:
1966 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1967 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1968 return;
1969
1970 case USE:
1971 (*fun) (&XEXP (body, 0), data);
1972 return;
1973
1974 case ASM_OPERANDS:
1975 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1976 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1977 return;
1978
1979 case TRAP_IF:
1980 (*fun) (&TRAP_CONDITION (body), data);
1981 return;
1982
1983 case PREFETCH:
1984 (*fun) (&XEXP (body, 0), data);
1985 return;
1986
1987 case UNSPEC:
1988 case UNSPEC_VOLATILE:
1989 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1990 (*fun) (&XVECEXP (body, 0, i), data);
1991 return;
1992
1993 case CLOBBER:
1994 if (MEM_P (XEXP (body, 0)))
1995 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1996 return;
1997
1998 case SET:
1999 {
2000 rtx dest = SET_DEST (body);
2001
2002 /* For sets we replace everything in source plus registers in memory
2003 expression in store and operands of a ZERO_EXTRACT. */
2004 (*fun) (&SET_SRC (body), data);
2005
2006 if (GET_CODE (dest) == ZERO_EXTRACT)
2007 {
2008 (*fun) (&XEXP (dest, 1), data);
2009 (*fun) (&XEXP (dest, 2), data);
2010 }
2011
2012 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2013 dest = XEXP (dest, 0);
2014
2015 if (MEM_P (dest))
2016 (*fun) (&XEXP (dest, 0), data);
2017 }
2018 return;
2019
2020 default:
2021 /* All the other possibilities never store. */
2022 (*fun) (pbody, data);
2023 return;
2024 }
2025 }
2026 \f
2027 /* Return nonzero if X's old contents don't survive after INSN.
2028 This will be true if X is (cc0) or if X is a register and
2029 X dies in INSN or because INSN entirely sets X.
2030
2031 "Entirely set" means set directly and not through a SUBREG, or
2032 ZERO_EXTRACT, so no trace of the old contents remains.
2033 Likewise, REG_INC does not count.
2034
2035 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2036 but for this use that makes no difference, since regs don't overlap
2037 during their lifetimes. Therefore, this function may be used
2038 at any time after deaths have been computed.
2039
2040 If REG is a hard reg that occupies multiple machine registers, this
2041 function will only return 1 if each of those registers will be replaced
2042 by INSN. */
2043
2044 int
2045 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2046 {
2047 unsigned int regno, end_regno;
2048 unsigned int i;
2049
2050 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2051 if (GET_CODE (x) == CC0)
2052 return 1;
2053
2054 gcc_assert (REG_P (x));
2055
2056 regno = REGNO (x);
2057 end_regno = END_REGNO (x);
2058 for (i = regno; i < end_regno; i++)
2059 if (! dead_or_set_regno_p (insn, i))
2060 return 0;
2061
2062 return 1;
2063 }
2064
2065 /* Return TRUE iff DEST is a register or subreg of a register, is a
2066 complete rather than read-modify-write destination, and contains
2067 register TEST_REGNO. */
2068
2069 static bool
2070 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2071 {
2072 unsigned int regno, endregno;
2073
2074 if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2075 dest = SUBREG_REG (dest);
2076
2077 if (!REG_P (dest))
2078 return false;
2079
2080 regno = REGNO (dest);
2081 endregno = END_REGNO (dest);
2082 return (test_regno >= regno && test_regno < endregno);
2083 }
2084
2085 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2086 any member matches the covers_regno_no_parallel_p criteria. */
2087
2088 static bool
2089 covers_regno_p (const_rtx dest, unsigned int test_regno)
2090 {
2091 if (GET_CODE (dest) == PARALLEL)
2092 {
2093 /* Some targets place small structures in registers for return
2094 values of functions, and those registers are wrapped in
2095 PARALLELs that we may see as the destination of a SET. */
2096 int i;
2097
2098 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2099 {
2100 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2101 if (inner != NULL_RTX
2102 && covers_regno_no_parallel_p (inner, test_regno))
2103 return true;
2104 }
2105
2106 return false;
2107 }
2108 else
2109 return covers_regno_no_parallel_p (dest, test_regno);
2110 }
2111
2112 /* Utility function for dead_or_set_p to check an individual register. */
2113
2114 int
2115 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2116 {
2117 const_rtx pattern;
2118
2119 /* See if there is a death note for something that includes TEST_REGNO. */
2120 if (find_regno_note (insn, REG_DEAD, test_regno))
2121 return 1;
2122
2123 if (CALL_P (insn)
2124 && find_regno_fusage (insn, CLOBBER, test_regno))
2125 return 1;
2126
2127 pattern = PATTERN (insn);
2128
2129 /* If a COND_EXEC is not executed, the value survives. */
2130 if (GET_CODE (pattern) == COND_EXEC)
2131 return 0;
2132
2133 if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2134 return covers_regno_p (SET_DEST (pattern), test_regno);
2135 else if (GET_CODE (pattern) == PARALLEL)
2136 {
2137 int i;
2138
2139 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2140 {
2141 rtx body = XVECEXP (pattern, 0, i);
2142
2143 if (GET_CODE (body) == COND_EXEC)
2144 body = COND_EXEC_CODE (body);
2145
2146 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2147 && covers_regno_p (SET_DEST (body), test_regno))
2148 return 1;
2149 }
2150 }
2151
2152 return 0;
2153 }
2154
2155 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2156 If DATUM is nonzero, look for one whose datum is DATUM. */
2157
2158 rtx
2159 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2160 {
2161 rtx link;
2162
2163 gcc_checking_assert (insn);
2164
2165 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2166 if (! INSN_P (insn))
2167 return 0;
2168 if (datum == 0)
2169 {
2170 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2171 if (REG_NOTE_KIND (link) == kind)
2172 return link;
2173 return 0;
2174 }
2175
2176 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2177 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2178 return link;
2179 return 0;
2180 }
2181
2182 /* Return the reg-note of kind KIND in insn INSN which applies to register
2183 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2184 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2185 it might be the case that the note overlaps REGNO. */
2186
2187 rtx
2188 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2189 {
2190 rtx link;
2191
2192 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2193 if (! INSN_P (insn))
2194 return 0;
2195
2196 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2197 if (REG_NOTE_KIND (link) == kind
2198 /* Verify that it is a register, so that scratch and MEM won't cause a
2199 problem here. */
2200 && REG_P (XEXP (link, 0))
2201 && REGNO (XEXP (link, 0)) <= regno
2202 && END_REGNO (XEXP (link, 0)) > regno)
2203 return link;
2204 return 0;
2205 }
2206
2207 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2208 has such a note. */
2209
2210 rtx
2211 find_reg_equal_equiv_note (const_rtx insn)
2212 {
2213 rtx link;
2214
2215 if (!INSN_P (insn))
2216 return 0;
2217
2218 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2219 if (REG_NOTE_KIND (link) == REG_EQUAL
2220 || REG_NOTE_KIND (link) == REG_EQUIV)
2221 {
2222 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2223 insns that have multiple sets. Checking single_set to
2224 make sure of this is not the proper check, as explained
2225 in the comment in set_unique_reg_note.
2226
2227 This should be changed into an assert. */
2228 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2229 return 0;
2230 return link;
2231 }
2232 return NULL;
2233 }
2234
2235 /* Check whether INSN is a single_set whose source is known to be
2236 equivalent to a constant. Return that constant if so, otherwise
2237 return null. */
2238
2239 rtx
2240 find_constant_src (const rtx_insn *insn)
2241 {
2242 rtx note, set, x;
2243
2244 set = single_set (insn);
2245 if (set)
2246 {
2247 x = avoid_constant_pool_reference (SET_SRC (set));
2248 if (CONSTANT_P (x))
2249 return x;
2250 }
2251
2252 note = find_reg_equal_equiv_note (insn);
2253 if (note && CONSTANT_P (XEXP (note, 0)))
2254 return XEXP (note, 0);
2255
2256 return NULL_RTX;
2257 }
2258
2259 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2260 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2261
2262 int
2263 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2264 {
2265 /* If it's not a CALL_INSN, it can't possibly have a
2266 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2267 if (!CALL_P (insn))
2268 return 0;
2269
2270 gcc_assert (datum);
2271
2272 if (!REG_P (datum))
2273 {
2274 rtx link;
2275
2276 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2277 link;
2278 link = XEXP (link, 1))
2279 if (GET_CODE (XEXP (link, 0)) == code
2280 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2281 return 1;
2282 }
2283 else
2284 {
2285 unsigned int regno = REGNO (datum);
2286
2287 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2288 to pseudo registers, so don't bother checking. */
2289
2290 if (regno < FIRST_PSEUDO_REGISTER)
2291 {
2292 unsigned int end_regno = END_REGNO (datum);
2293 unsigned int i;
2294
2295 for (i = regno; i < end_regno; i++)
2296 if (find_regno_fusage (insn, code, i))
2297 return 1;
2298 }
2299 }
2300
2301 return 0;
2302 }
2303
2304 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2305 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2306
2307 int
2308 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2309 {
2310 rtx link;
2311
2312 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2313 to pseudo registers, so don't bother checking. */
2314
2315 if (regno >= FIRST_PSEUDO_REGISTER
2316 || !CALL_P (insn) )
2317 return 0;
2318
2319 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2320 {
2321 rtx op, reg;
2322
2323 if (GET_CODE (op = XEXP (link, 0)) == code
2324 && REG_P (reg = XEXP (op, 0))
2325 && REGNO (reg) <= regno
2326 && END_REGNO (reg) > regno)
2327 return 1;
2328 }
2329
2330 return 0;
2331 }
2332
2333 \f
2334 /* Return true if KIND is an integer REG_NOTE. */
2335
2336 static bool
2337 int_reg_note_p (enum reg_note kind)
2338 {
2339 return kind == REG_BR_PROB;
2340 }
2341
2342 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2343 stored as the pointer to the next register note. */
2344
2345 rtx
2346 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2347 {
2348 rtx note;
2349
2350 gcc_checking_assert (!int_reg_note_p (kind));
2351 switch (kind)
2352 {
2353 case REG_CC_SETTER:
2354 case REG_CC_USER:
2355 case REG_LABEL_TARGET:
2356 case REG_LABEL_OPERAND:
2357 case REG_TM:
2358 /* These types of register notes use an INSN_LIST rather than an
2359 EXPR_LIST, so that copying is done right and dumps look
2360 better. */
2361 note = alloc_INSN_LIST (datum, list);
2362 PUT_REG_NOTE_KIND (note, kind);
2363 break;
2364
2365 default:
2366 note = alloc_EXPR_LIST (kind, datum, list);
2367 break;
2368 }
2369
2370 return note;
2371 }
2372
2373 /* Add register note with kind KIND and datum DATUM to INSN. */
2374
2375 void
2376 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2377 {
2378 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2379 }
2380
2381 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2382
2383 void
2384 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2385 {
2386 gcc_checking_assert (int_reg_note_p (kind));
2387 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2388 datum, REG_NOTES (insn));
2389 }
2390
2391 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2392
2393 void
2394 add_args_size_note (rtx_insn *insn, poly_int64 value)
2395 {
2396 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2397 add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2398 }
2399
2400 /* Add a register note like NOTE to INSN. */
2401
2402 void
2403 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2404 {
2405 if (GET_CODE (note) == INT_LIST)
2406 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2407 else
2408 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2409 }
2410
2411 /* Duplicate NOTE and return the copy. */
2412 rtx
2413 duplicate_reg_note (rtx note)
2414 {
2415 reg_note kind = REG_NOTE_KIND (note);
2416
2417 if (GET_CODE (note) == INT_LIST)
2418 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2419 else if (GET_CODE (note) == EXPR_LIST)
2420 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2421 else
2422 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2423 }
2424
2425 /* Remove register note NOTE from the REG_NOTES of INSN. */
2426
2427 void
2428 remove_note (rtx_insn *insn, const_rtx note)
2429 {
2430 rtx link;
2431
2432 if (note == NULL_RTX)
2433 return;
2434
2435 if (REG_NOTES (insn) == note)
2436 REG_NOTES (insn) = XEXP (note, 1);
2437 else
2438 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2439 if (XEXP (link, 1) == note)
2440 {
2441 XEXP (link, 1) = XEXP (note, 1);
2442 break;
2443 }
2444
2445 switch (REG_NOTE_KIND (note))
2446 {
2447 case REG_EQUAL:
2448 case REG_EQUIV:
2449 df_notes_rescan (insn);
2450 break;
2451 default:
2452 break;
2453 }
2454 }
2455
2456 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2457 Return true if any note has been removed. */
2458
2459 bool
2460 remove_reg_equal_equiv_notes (rtx_insn *insn)
2461 {
2462 rtx *loc;
2463 bool ret = false;
2464
2465 loc = &REG_NOTES (insn);
2466 while (*loc)
2467 {
2468 enum reg_note kind = REG_NOTE_KIND (*loc);
2469 if (kind == REG_EQUAL || kind == REG_EQUIV)
2470 {
2471 *loc = XEXP (*loc, 1);
2472 ret = true;
2473 }
2474 else
2475 loc = &XEXP (*loc, 1);
2476 }
2477 return ret;
2478 }
2479
2480 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2481
2482 void
2483 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2484 {
2485 df_ref eq_use;
2486
2487 if (!df)
2488 return;
2489
2490 /* This loop is a little tricky. We cannot just go down the chain because
2491 it is being modified by some actions in the loop. So we just iterate
2492 over the head. We plan to drain the list anyway. */
2493 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2494 {
2495 rtx_insn *insn = DF_REF_INSN (eq_use);
2496 rtx note = find_reg_equal_equiv_note (insn);
2497
2498 /* This assert is generally triggered when someone deletes a REG_EQUAL
2499 or REG_EQUIV note by hacking the list manually rather than calling
2500 remove_note. */
2501 gcc_assert (note);
2502
2503 remove_note (insn, note);
2504 }
2505 }
2506
2507 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2508 return 1 if it is found. A simple equality test is used to determine if
2509 NODE matches. */
2510
2511 bool
2512 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2513 {
2514 const_rtx x;
2515
2516 for (x = listp; x; x = XEXP (x, 1))
2517 if (node == XEXP (x, 0))
2518 return true;
2519
2520 return false;
2521 }
2522
2523 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2524 remove that entry from the list if it is found.
2525
2526 A simple equality test is used to determine if NODE matches. */
2527
2528 void
2529 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2530 {
2531 rtx_expr_list *temp = *listp;
2532 rtx_expr_list *prev = NULL;
2533
2534 while (temp)
2535 {
2536 if (node == temp->element ())
2537 {
2538 /* Splice the node out of the list. */
2539 if (prev)
2540 XEXP (prev, 1) = temp->next ();
2541 else
2542 *listp = temp->next ();
2543
2544 return;
2545 }
2546
2547 prev = temp;
2548 temp = temp->next ();
2549 }
2550 }
2551
2552 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2553 remove that entry from the list if it is found.
2554
2555 A simple equality test is used to determine if NODE matches. */
2556
2557 void
2558 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2559 {
2560 rtx_insn_list *temp = *listp;
2561 rtx_insn_list *prev = NULL;
2562
2563 while (temp)
2564 {
2565 if (node == temp->insn ())
2566 {
2567 /* Splice the node out of the list. */
2568 if (prev)
2569 XEXP (prev, 1) = temp->next ();
2570 else
2571 *listp = temp->next ();
2572
2573 return;
2574 }
2575
2576 prev = temp;
2577 temp = temp->next ();
2578 }
2579 }
2580 \f
2581 /* Nonzero if X contains any volatile instructions. These are instructions
2582 which may cause unpredictable machine state instructions, and thus no
2583 instructions or register uses should be moved or combined across them.
2584 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2585
2586 int
2587 volatile_insn_p (const_rtx x)
2588 {
2589 const RTX_CODE code = GET_CODE (x);
2590 switch (code)
2591 {
2592 case LABEL_REF:
2593 case SYMBOL_REF:
2594 case CONST:
2595 CASE_CONST_ANY:
2596 case CC0:
2597 case PC:
2598 case REG:
2599 case SCRATCH:
2600 case CLOBBER:
2601 case ADDR_VEC:
2602 case ADDR_DIFF_VEC:
2603 case CALL:
2604 case MEM:
2605 return 0;
2606
2607 case UNSPEC_VOLATILE:
2608 return 1;
2609
2610 case ASM_INPUT:
2611 case ASM_OPERANDS:
2612 if (MEM_VOLATILE_P (x))
2613 return 1;
2614
2615 default:
2616 break;
2617 }
2618
2619 /* Recursively scan the operands of this expression. */
2620
2621 {
2622 const char *const fmt = GET_RTX_FORMAT (code);
2623 int i;
2624
2625 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2626 {
2627 if (fmt[i] == 'e')
2628 {
2629 if (volatile_insn_p (XEXP (x, i)))
2630 return 1;
2631 }
2632 else if (fmt[i] == 'E')
2633 {
2634 int j;
2635 for (j = 0; j < XVECLEN (x, i); j++)
2636 if (volatile_insn_p (XVECEXP (x, i, j)))
2637 return 1;
2638 }
2639 }
2640 }
2641 return 0;
2642 }
2643
2644 /* Nonzero if X contains any volatile memory references
2645 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2646
2647 int
2648 volatile_refs_p (const_rtx x)
2649 {
2650 const RTX_CODE code = GET_CODE (x);
2651 switch (code)
2652 {
2653 case LABEL_REF:
2654 case SYMBOL_REF:
2655 case CONST:
2656 CASE_CONST_ANY:
2657 case CC0:
2658 case PC:
2659 case REG:
2660 case SCRATCH:
2661 case CLOBBER:
2662 case ADDR_VEC:
2663 case ADDR_DIFF_VEC:
2664 return 0;
2665
2666 case UNSPEC_VOLATILE:
2667 return 1;
2668
2669 case MEM:
2670 case ASM_INPUT:
2671 case ASM_OPERANDS:
2672 if (MEM_VOLATILE_P (x))
2673 return 1;
2674
2675 default:
2676 break;
2677 }
2678
2679 /* Recursively scan the operands of this expression. */
2680
2681 {
2682 const char *const fmt = GET_RTX_FORMAT (code);
2683 int i;
2684
2685 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2686 {
2687 if (fmt[i] == 'e')
2688 {
2689 if (volatile_refs_p (XEXP (x, i)))
2690 return 1;
2691 }
2692 else if (fmt[i] == 'E')
2693 {
2694 int j;
2695 for (j = 0; j < XVECLEN (x, i); j++)
2696 if (volatile_refs_p (XVECEXP (x, i, j)))
2697 return 1;
2698 }
2699 }
2700 }
2701 return 0;
2702 }
2703
2704 /* Similar to above, except that it also rejects register pre- and post-
2705 incrementing. */
2706
2707 int
2708 side_effects_p (const_rtx x)
2709 {
2710 const RTX_CODE code = GET_CODE (x);
2711 switch (code)
2712 {
2713 case LABEL_REF:
2714 case SYMBOL_REF:
2715 case CONST:
2716 CASE_CONST_ANY:
2717 case CC0:
2718 case PC:
2719 case REG:
2720 case SCRATCH:
2721 case ADDR_VEC:
2722 case ADDR_DIFF_VEC:
2723 case VAR_LOCATION:
2724 return 0;
2725
2726 case CLOBBER:
2727 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2728 when some combination can't be done. If we see one, don't think
2729 that we can simplify the expression. */
2730 return (GET_MODE (x) != VOIDmode);
2731
2732 case PRE_INC:
2733 case PRE_DEC:
2734 case POST_INC:
2735 case POST_DEC:
2736 case PRE_MODIFY:
2737 case POST_MODIFY:
2738 case CALL:
2739 case UNSPEC_VOLATILE:
2740 return 1;
2741
2742 case MEM:
2743 case ASM_INPUT:
2744 case ASM_OPERANDS:
2745 if (MEM_VOLATILE_P (x))
2746 return 1;
2747
2748 default:
2749 break;
2750 }
2751
2752 /* Recursively scan the operands of this expression. */
2753
2754 {
2755 const char *fmt = GET_RTX_FORMAT (code);
2756 int i;
2757
2758 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2759 {
2760 if (fmt[i] == 'e')
2761 {
2762 if (side_effects_p (XEXP (x, i)))
2763 return 1;
2764 }
2765 else if (fmt[i] == 'E')
2766 {
2767 int j;
2768 for (j = 0; j < XVECLEN (x, i); j++)
2769 if (side_effects_p (XVECEXP (x, i, j)))
2770 return 1;
2771 }
2772 }
2773 }
2774 return 0;
2775 }
2776 \f
2777 /* Return nonzero if evaluating rtx X might cause a trap.
2778 FLAGS controls how to consider MEMs. A nonzero means the context
2779 of the access may have changed from the original, such that the
2780 address may have become invalid. */
2781
2782 int
2783 may_trap_p_1 (const_rtx x, unsigned flags)
2784 {
2785 int i;
2786 enum rtx_code code;
2787 const char *fmt;
2788
2789 /* We make no distinction currently, but this function is part of
2790 the internal target-hooks ABI so we keep the parameter as
2791 "unsigned flags". */
2792 bool code_changed = flags != 0;
2793
2794 if (x == 0)
2795 return 0;
2796 code = GET_CODE (x);
2797 switch (code)
2798 {
2799 /* Handle these cases quickly. */
2800 CASE_CONST_ANY:
2801 case SYMBOL_REF:
2802 case LABEL_REF:
2803 case CONST:
2804 case PC:
2805 case CC0:
2806 case REG:
2807 case SCRATCH:
2808 return 0;
2809
2810 case UNSPEC:
2811 return targetm.unspec_may_trap_p (x, flags);
2812
2813 case UNSPEC_VOLATILE:
2814 case ASM_INPUT:
2815 case TRAP_IF:
2816 return 1;
2817
2818 case ASM_OPERANDS:
2819 return MEM_VOLATILE_P (x);
2820
2821 /* Memory ref can trap unless it's a static var or a stack slot. */
2822 case MEM:
2823 /* Recognize specific pattern of stack checking probes. */
2824 if (flag_stack_check
2825 && MEM_VOLATILE_P (x)
2826 && XEXP (x, 0) == stack_pointer_rtx)
2827 return 1;
2828 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2829 reference; moving it out of context such as when moving code
2830 when optimizing, might cause its address to become invalid. */
2831 code_changed
2832 || !MEM_NOTRAP_P (x))
2833 {
2834 poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2835 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2836 GET_MODE (x), code_changed);
2837 }
2838
2839 return 0;
2840
2841 /* Division by a non-constant might trap. */
2842 case DIV:
2843 case MOD:
2844 case UDIV:
2845 case UMOD:
2846 if (HONOR_SNANS (x))
2847 return 1;
2848 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2849 return flag_trapping_math;
2850 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2851 return 1;
2852 break;
2853
2854 case EXPR_LIST:
2855 /* An EXPR_LIST is used to represent a function call. This
2856 certainly may trap. */
2857 return 1;
2858
2859 case GE:
2860 case GT:
2861 case LE:
2862 case LT:
2863 case LTGT:
2864 case COMPARE:
2865 /* Some floating point comparisons may trap. */
2866 if (!flag_trapping_math)
2867 break;
2868 /* ??? There is no machine independent way to check for tests that trap
2869 when COMPARE is used, though many targets do make this distinction.
2870 For instance, sparc uses CCFPE for compares which generate exceptions
2871 and CCFP for compares which do not generate exceptions. */
2872 if (HONOR_NANS (x))
2873 return 1;
2874 /* But often the compare has some CC mode, so check operand
2875 modes as well. */
2876 if (HONOR_NANS (XEXP (x, 0))
2877 || HONOR_NANS (XEXP (x, 1)))
2878 return 1;
2879 break;
2880
2881 case EQ:
2882 case NE:
2883 if (HONOR_SNANS (x))
2884 return 1;
2885 /* Often comparison is CC mode, so check operand modes. */
2886 if (HONOR_SNANS (XEXP (x, 0))
2887 || HONOR_SNANS (XEXP (x, 1)))
2888 return 1;
2889 break;
2890
2891 case FIX:
2892 /* Conversion of floating point might trap. */
2893 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2894 return 1;
2895 break;
2896
2897 case NEG:
2898 case ABS:
2899 case SUBREG:
2900 /* These operations don't trap even with floating point. */
2901 break;
2902
2903 default:
2904 /* Any floating arithmetic may trap. */
2905 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2906 return 1;
2907 }
2908
2909 fmt = GET_RTX_FORMAT (code);
2910 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2911 {
2912 if (fmt[i] == 'e')
2913 {
2914 if (may_trap_p_1 (XEXP (x, i), flags))
2915 return 1;
2916 }
2917 else if (fmt[i] == 'E')
2918 {
2919 int j;
2920 for (j = 0; j < XVECLEN (x, i); j++)
2921 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2922 return 1;
2923 }
2924 }
2925 return 0;
2926 }
2927
2928 /* Return nonzero if evaluating rtx X might cause a trap. */
2929
2930 int
2931 may_trap_p (const_rtx x)
2932 {
2933 return may_trap_p_1 (x, 0);
2934 }
2935
2936 /* Same as above, but additionally return nonzero if evaluating rtx X might
2937 cause a fault. We define a fault for the purpose of this function as a
2938 erroneous execution condition that cannot be encountered during the normal
2939 execution of a valid program; the typical example is an unaligned memory
2940 access on a strict alignment machine. The compiler guarantees that it
2941 doesn't generate code that will fault from a valid program, but this
2942 guarantee doesn't mean anything for individual instructions. Consider
2943 the following example:
2944
2945 struct S { int d; union { char *cp; int *ip; }; };
2946
2947 int foo(struct S *s)
2948 {
2949 if (s->d == 1)
2950 return *s->ip;
2951 else
2952 return *s->cp;
2953 }
2954
2955 on a strict alignment machine. In a valid program, foo will never be
2956 invoked on a structure for which d is equal to 1 and the underlying
2957 unique field of the union not aligned on a 4-byte boundary, but the
2958 expression *s->ip might cause a fault if considered individually.
2959
2960 At the RTL level, potentially problematic expressions will almost always
2961 verify may_trap_p; for example, the above dereference can be emitted as
2962 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2963 However, suppose that foo is inlined in a caller that causes s->cp to
2964 point to a local character variable and guarantees that s->d is not set
2965 to 1; foo may have been effectively translated into pseudo-RTL as:
2966
2967 if ((reg:SI) == 1)
2968 (set (reg:SI) (mem:SI (%fp - 7)))
2969 else
2970 (set (reg:QI) (mem:QI (%fp - 7)))
2971
2972 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2973 memory reference to a stack slot, but it will certainly cause a fault
2974 on a strict alignment machine. */
2975
2976 int
2977 may_trap_or_fault_p (const_rtx x)
2978 {
2979 return may_trap_p_1 (x, 1);
2980 }
2981 \f
2982 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2983 i.e., an inequality. */
2984
2985 int
2986 inequality_comparisons_p (const_rtx x)
2987 {
2988 const char *fmt;
2989 int len, i;
2990 const enum rtx_code code = GET_CODE (x);
2991
2992 switch (code)
2993 {
2994 case REG:
2995 case SCRATCH:
2996 case PC:
2997 case CC0:
2998 CASE_CONST_ANY:
2999 case CONST:
3000 case LABEL_REF:
3001 case SYMBOL_REF:
3002 return 0;
3003
3004 case LT:
3005 case LTU:
3006 case GT:
3007 case GTU:
3008 case LE:
3009 case LEU:
3010 case GE:
3011 case GEU:
3012 return 1;
3013
3014 default:
3015 break;
3016 }
3017
3018 len = GET_RTX_LENGTH (code);
3019 fmt = GET_RTX_FORMAT (code);
3020
3021 for (i = 0; i < len; i++)
3022 {
3023 if (fmt[i] == 'e')
3024 {
3025 if (inequality_comparisons_p (XEXP (x, i)))
3026 return 1;
3027 }
3028 else if (fmt[i] == 'E')
3029 {
3030 int j;
3031 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3032 if (inequality_comparisons_p (XVECEXP (x, i, j)))
3033 return 1;
3034 }
3035 }
3036
3037 return 0;
3038 }
3039 \f
3040 /* Replace any occurrence of FROM in X with TO. The function does
3041 not enter into CONST_DOUBLE for the replace.
3042
3043 Note that copying is not done so X must not be shared unless all copies
3044 are to be modified.
3045
3046 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3047 those pointer-equal ones. */
3048
3049 rtx
3050 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3051 {
3052 int i, j;
3053 const char *fmt;
3054
3055 if (x == from)
3056 return to;
3057
3058 /* Allow this function to make replacements in EXPR_LISTs. */
3059 if (x == 0)
3060 return 0;
3061
3062 if (all_regs
3063 && REG_P (x)
3064 && REG_P (from)
3065 && REGNO (x) == REGNO (from))
3066 {
3067 gcc_assert (GET_MODE (x) == GET_MODE (from));
3068 return to;
3069 }
3070 else if (GET_CODE (x) == SUBREG)
3071 {
3072 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3073
3074 if (CONST_INT_P (new_rtx))
3075 {
3076 x = simplify_subreg (GET_MODE (x), new_rtx,
3077 GET_MODE (SUBREG_REG (x)),
3078 SUBREG_BYTE (x));
3079 gcc_assert (x);
3080 }
3081 else
3082 SUBREG_REG (x) = new_rtx;
3083
3084 return x;
3085 }
3086 else if (GET_CODE (x) == ZERO_EXTEND)
3087 {
3088 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3089
3090 if (CONST_INT_P (new_rtx))
3091 {
3092 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3093 new_rtx, GET_MODE (XEXP (x, 0)));
3094 gcc_assert (x);
3095 }
3096 else
3097 XEXP (x, 0) = new_rtx;
3098
3099 return x;
3100 }
3101
3102 fmt = GET_RTX_FORMAT (GET_CODE (x));
3103 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3104 {
3105 if (fmt[i] == 'e')
3106 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3107 else if (fmt[i] == 'E')
3108 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3109 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3110 from, to, all_regs);
3111 }
3112
3113 return x;
3114 }
3115 \f
3116 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3117 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3118
3119 void
3120 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3121 {
3122 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3123 rtx x = *loc;
3124 if (JUMP_TABLE_DATA_P (x))
3125 {
3126 x = PATTERN (x);
3127 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3128 int len = GET_NUM_ELEM (vec);
3129 for (int i = 0; i < len; ++i)
3130 {
3131 rtx ref = RTVEC_ELT (vec, i);
3132 if (XEXP (ref, 0) == old_label)
3133 {
3134 XEXP (ref, 0) = new_label;
3135 if (update_label_nuses)
3136 {
3137 ++LABEL_NUSES (new_label);
3138 --LABEL_NUSES (old_label);
3139 }
3140 }
3141 }
3142 return;
3143 }
3144
3145 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3146 field. This is not handled by the iterator because it doesn't
3147 handle unprinted ('0') fields. */
3148 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3149 JUMP_LABEL (x) = new_label;
3150
3151 subrtx_ptr_iterator::array_type array;
3152 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3153 {
3154 rtx *loc = *iter;
3155 if (rtx x = *loc)
3156 {
3157 if (GET_CODE (x) == SYMBOL_REF
3158 && CONSTANT_POOL_ADDRESS_P (x))
3159 {
3160 rtx c = get_pool_constant (x);
3161 if (rtx_referenced_p (old_label, c))
3162 {
3163 /* Create a copy of constant C; replace the label inside
3164 but do not update LABEL_NUSES because uses in constant pool
3165 are not counted. */
3166 rtx new_c = copy_rtx (c);
3167 replace_label (&new_c, old_label, new_label, false);
3168
3169 /* Add the new constant NEW_C to constant pool and replace
3170 the old reference to constant by new reference. */
3171 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3172 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3173 }
3174 }
3175
3176 if ((GET_CODE (x) == LABEL_REF
3177 || GET_CODE (x) == INSN_LIST)
3178 && XEXP (x, 0) == old_label)
3179 {
3180 XEXP (x, 0) = new_label;
3181 if (update_label_nuses)
3182 {
3183 ++LABEL_NUSES (new_label);
3184 --LABEL_NUSES (old_label);
3185 }
3186 }
3187 }
3188 }
3189 }
3190
3191 void
3192 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3193 rtx_insn *new_label, bool update_label_nuses)
3194 {
3195 rtx insn_as_rtx = insn;
3196 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3197 gcc_checking_assert (insn_as_rtx == insn);
3198 }
3199
3200 /* Return true if X is referenced in BODY. */
3201
3202 bool
3203 rtx_referenced_p (const_rtx x, const_rtx body)
3204 {
3205 subrtx_iterator::array_type array;
3206 FOR_EACH_SUBRTX (iter, array, body, ALL)
3207 if (const_rtx y = *iter)
3208 {
3209 /* Check if a label_ref Y refers to label X. */
3210 if (GET_CODE (y) == LABEL_REF
3211 && LABEL_P (x)
3212 && label_ref_label (y) == x)
3213 return true;
3214
3215 if (rtx_equal_p (x, y))
3216 return true;
3217
3218 /* If Y is a reference to pool constant traverse the constant. */
3219 if (GET_CODE (y) == SYMBOL_REF
3220 && CONSTANT_POOL_ADDRESS_P (y))
3221 iter.substitute (get_pool_constant (y));
3222 }
3223 return false;
3224 }
3225
3226 /* If INSN is a tablejump return true and store the label (before jump table) to
3227 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3228
3229 bool
3230 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3231 rtx_jump_table_data **tablep)
3232 {
3233 if (!JUMP_P (insn))
3234 return false;
3235
3236 rtx target = JUMP_LABEL (insn);
3237 if (target == NULL_RTX || ANY_RETURN_P (target))
3238 return false;
3239
3240 rtx_insn *label = as_a<rtx_insn *> (target);
3241 rtx_insn *table = next_insn (label);
3242 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3243 return false;
3244
3245 if (labelp)
3246 *labelp = label;
3247 if (tablep)
3248 *tablep = as_a <rtx_jump_table_data *> (table);
3249 return true;
3250 }
3251
3252 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3253 constant that is not in the constant pool and not in the condition
3254 of an IF_THEN_ELSE. */
3255
3256 static int
3257 computed_jump_p_1 (const_rtx x)
3258 {
3259 const enum rtx_code code = GET_CODE (x);
3260 int i, j;
3261 const char *fmt;
3262
3263 switch (code)
3264 {
3265 case LABEL_REF:
3266 case PC:
3267 return 0;
3268
3269 case CONST:
3270 CASE_CONST_ANY:
3271 case SYMBOL_REF:
3272 case REG:
3273 return 1;
3274
3275 case MEM:
3276 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3277 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3278
3279 case IF_THEN_ELSE:
3280 return (computed_jump_p_1 (XEXP (x, 1))
3281 || computed_jump_p_1 (XEXP (x, 2)));
3282
3283 default:
3284 break;
3285 }
3286
3287 fmt = GET_RTX_FORMAT (code);
3288 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3289 {
3290 if (fmt[i] == 'e'
3291 && computed_jump_p_1 (XEXP (x, i)))
3292 return 1;
3293
3294 else if (fmt[i] == 'E')
3295 for (j = 0; j < XVECLEN (x, i); j++)
3296 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3297 return 1;
3298 }
3299
3300 return 0;
3301 }
3302
3303 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3304
3305 Tablejumps and casesi insns are not considered indirect jumps;
3306 we can recognize them by a (use (label_ref)). */
3307
3308 int
3309 computed_jump_p (const rtx_insn *insn)
3310 {
3311 int i;
3312 if (JUMP_P (insn))
3313 {
3314 rtx pat = PATTERN (insn);
3315
3316 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3317 if (JUMP_LABEL (insn) != NULL)
3318 return 0;
3319
3320 if (GET_CODE (pat) == PARALLEL)
3321 {
3322 int len = XVECLEN (pat, 0);
3323 int has_use_labelref = 0;
3324
3325 for (i = len - 1; i >= 0; i--)
3326 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3327 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3328 == LABEL_REF))
3329 {
3330 has_use_labelref = 1;
3331 break;
3332 }
3333
3334 if (! has_use_labelref)
3335 for (i = len - 1; i >= 0; i--)
3336 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3337 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3338 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3339 return 1;
3340 }
3341 else if (GET_CODE (pat) == SET
3342 && SET_DEST (pat) == pc_rtx
3343 && computed_jump_p_1 (SET_SRC (pat)))
3344 return 1;
3345 }
3346 return 0;
3347 }
3348
3349 \f
3350
3351 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3352 the equivalent add insn and pass the result to FN, using DATA as the
3353 final argument. */
3354
3355 static int
3356 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3357 {
3358 rtx x = XEXP (mem, 0);
3359 switch (GET_CODE (x))
3360 {
3361 case PRE_INC:
3362 case POST_INC:
3363 {
3364 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3365 rtx r1 = XEXP (x, 0);
3366 rtx c = gen_int_mode (size, GET_MODE (r1));
3367 return fn (mem, x, r1, r1, c, data);
3368 }
3369
3370 case PRE_DEC:
3371 case POST_DEC:
3372 {
3373 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3374 rtx r1 = XEXP (x, 0);
3375 rtx c = gen_int_mode (-size, GET_MODE (r1));
3376 return fn (mem, x, r1, r1, c, data);
3377 }
3378
3379 case PRE_MODIFY:
3380 case POST_MODIFY:
3381 {
3382 rtx r1 = XEXP (x, 0);
3383 rtx add = XEXP (x, 1);
3384 return fn (mem, x, r1, add, NULL, data);
3385 }
3386
3387 default:
3388 gcc_unreachable ();
3389 }
3390 }
3391
3392 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3393 For each such autoinc operation found, call FN, passing it
3394 the innermost enclosing MEM, the operation itself, the RTX modified
3395 by the operation, two RTXs (the second may be NULL) that, once
3396 added, represent the value to be held by the modified RTX
3397 afterwards, and DATA. FN is to return 0 to continue the
3398 traversal or any other value to have it returned to the caller of
3399 for_each_inc_dec. */
3400
3401 int
3402 for_each_inc_dec (rtx x,
3403 for_each_inc_dec_fn fn,
3404 void *data)
3405 {
3406 subrtx_var_iterator::array_type array;
3407 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3408 {
3409 rtx mem = *iter;
3410 if (mem
3411 && MEM_P (mem)
3412 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3413 {
3414 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3415 if (res != 0)
3416 return res;
3417 iter.skip_subrtxes ();
3418 }
3419 }
3420 return 0;
3421 }
3422
3423 \f
3424 /* Searches X for any reference to REGNO, returning the rtx of the
3425 reference found if any. Otherwise, returns NULL_RTX. */
3426
3427 rtx
3428 regno_use_in (unsigned int regno, rtx x)
3429 {
3430 const char *fmt;
3431 int i, j;
3432 rtx tem;
3433
3434 if (REG_P (x) && REGNO (x) == regno)
3435 return x;
3436
3437 fmt = GET_RTX_FORMAT (GET_CODE (x));
3438 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3439 {
3440 if (fmt[i] == 'e')
3441 {
3442 if ((tem = regno_use_in (regno, XEXP (x, i))))
3443 return tem;
3444 }
3445 else if (fmt[i] == 'E')
3446 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3447 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3448 return tem;
3449 }
3450
3451 return NULL_RTX;
3452 }
3453
3454 /* Return a value indicating whether OP, an operand of a commutative
3455 operation, is preferred as the first or second operand. The more
3456 positive the value, the stronger the preference for being the first
3457 operand. */
3458
3459 int
3460 commutative_operand_precedence (rtx op)
3461 {
3462 enum rtx_code code = GET_CODE (op);
3463
3464 /* Constants always become the second operand. Prefer "nice" constants. */
3465 if (code == CONST_INT)
3466 return -10;
3467 if (code == CONST_WIDE_INT)
3468 return -9;
3469 if (code == CONST_POLY_INT)
3470 return -8;
3471 if (code == CONST_DOUBLE)
3472 return -8;
3473 if (code == CONST_FIXED)
3474 return -8;
3475 op = avoid_constant_pool_reference (op);
3476 code = GET_CODE (op);
3477
3478 switch (GET_RTX_CLASS (code))
3479 {
3480 case RTX_CONST_OBJ:
3481 if (code == CONST_INT)
3482 return -7;
3483 if (code == CONST_WIDE_INT)
3484 return -6;
3485 if (code == CONST_POLY_INT)
3486 return -5;
3487 if (code == CONST_DOUBLE)
3488 return -5;
3489 if (code == CONST_FIXED)
3490 return -5;
3491 return -4;
3492
3493 case RTX_EXTRA:
3494 /* SUBREGs of objects should come second. */
3495 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3496 return -3;
3497 return 0;
3498
3499 case RTX_OBJ:
3500 /* Complex expressions should be the first, so decrease priority
3501 of objects. Prefer pointer objects over non pointer objects. */
3502 if ((REG_P (op) && REG_POINTER (op))
3503 || (MEM_P (op) && MEM_POINTER (op)))
3504 return -1;
3505 return -2;
3506
3507 case RTX_COMM_ARITH:
3508 /* Prefer operands that are themselves commutative to be first.
3509 This helps to make things linear. In particular,
3510 (and (and (reg) (reg)) (not (reg))) is canonical. */
3511 return 4;
3512
3513 case RTX_BIN_ARITH:
3514 /* If only one operand is a binary expression, it will be the first
3515 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3516 is canonical, although it will usually be further simplified. */
3517 return 2;
3518
3519 case RTX_UNARY:
3520 /* Then prefer NEG and NOT. */
3521 if (code == NEG || code == NOT)
3522 return 1;
3523 /* FALLTHRU */
3524
3525 default:
3526 return 0;
3527 }
3528 }
3529
3530 /* Return 1 iff it is necessary to swap operands of commutative operation
3531 in order to canonicalize expression. */
3532
3533 bool
3534 swap_commutative_operands_p (rtx x, rtx y)
3535 {
3536 return (commutative_operand_precedence (x)
3537 < commutative_operand_precedence (y));
3538 }
3539
3540 /* Return 1 if X is an autoincrement side effect and the register is
3541 not the stack pointer. */
3542 int
3543 auto_inc_p (const_rtx x)
3544 {
3545 switch (GET_CODE (x))
3546 {
3547 case PRE_INC:
3548 case POST_INC:
3549 case PRE_DEC:
3550 case POST_DEC:
3551 case PRE_MODIFY:
3552 case POST_MODIFY:
3553 /* There are no REG_INC notes for SP. */
3554 if (XEXP (x, 0) != stack_pointer_rtx)
3555 return 1;
3556 default:
3557 break;
3558 }
3559 return 0;
3560 }
3561
3562 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3563 int
3564 loc_mentioned_in_p (rtx *loc, const_rtx in)
3565 {
3566 enum rtx_code code;
3567 const char *fmt;
3568 int i, j;
3569
3570 if (!in)
3571 return 0;
3572
3573 code = GET_CODE (in);
3574 fmt = GET_RTX_FORMAT (code);
3575 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3576 {
3577 if (fmt[i] == 'e')
3578 {
3579 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3580 return 1;
3581 }
3582 else if (fmt[i] == 'E')
3583 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3584 if (loc == &XVECEXP (in, i, j)
3585 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3586 return 1;
3587 }
3588 return 0;
3589 }
3590
3591 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3592 and SUBREG_BYTE, return the bit offset where the subreg begins
3593 (counting from the least significant bit of the operand). */
3594
3595 poly_uint64
3596 subreg_lsb_1 (machine_mode outer_mode,
3597 machine_mode inner_mode,
3598 poly_uint64 subreg_byte)
3599 {
3600 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3601
3602 /* A paradoxical subreg begins at bit position 0. */
3603 if (paradoxical_subreg_p (outer_mode, inner_mode))
3604 return 0;
3605
3606 subreg_end = subreg_byte + GET_MODE_SIZE (outer_mode);
3607 trailing_bytes = GET_MODE_SIZE (inner_mode) - subreg_end;
3608 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3609 byte_pos = trailing_bytes;
3610 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3611 byte_pos = subreg_byte;
3612 else
3613 {
3614 /* When bytes and words have opposite endianness, we must be able
3615 to split offsets into words and bytes at compile time. */
3616 poly_uint64 leading_word_part
3617 = force_align_down (subreg_byte, UNITS_PER_WORD);
3618 poly_uint64 trailing_word_part
3619 = force_align_down (trailing_bytes, UNITS_PER_WORD);
3620 /* If the subreg crosses a word boundary ensure that
3621 it also begins and ends on a word boundary. */
3622 gcc_assert (known_le (subreg_end - leading_word_part,
3623 (unsigned int) UNITS_PER_WORD)
3624 || (known_eq (leading_word_part, subreg_byte)
3625 && known_eq (trailing_word_part, trailing_bytes)));
3626 if (WORDS_BIG_ENDIAN)
3627 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3628 else
3629 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3630 }
3631
3632 return byte_pos * BITS_PER_UNIT;
3633 }
3634
3635 /* Given a subreg X, return the bit offset where the subreg begins
3636 (counting from the least significant bit of the reg). */
3637
3638 poly_uint64
3639 subreg_lsb (const_rtx x)
3640 {
3641 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3642 SUBREG_BYTE (x));
3643 }
3644
3645 /* Return the subreg byte offset for a subreg whose outer value has
3646 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3647 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3648 lsb of the inner value. This is the inverse of the calculation
3649 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3650
3651 poly_uint64
3652 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3653 poly_uint64 lsb_shift)
3654 {
3655 /* A paradoxical subreg begins at bit position 0. */
3656 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3657 if (maybe_gt (outer_bytes, inner_bytes))
3658 {
3659 gcc_checking_assert (known_eq (lsb_shift, 0U));
3660 return 0;
3661 }
3662
3663 poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3664 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3665 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3666 return upper_bytes;
3667 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3668 return lower_bytes;
3669 else
3670 {
3671 /* When bytes and words have opposite endianness, we must be able
3672 to split offsets into words and bytes at compile time. */
3673 poly_uint64 lower_word_part = force_align_down (lower_bytes,
3674 UNITS_PER_WORD);
3675 poly_uint64 upper_word_part = force_align_down (upper_bytes,
3676 UNITS_PER_WORD);
3677 if (WORDS_BIG_ENDIAN)
3678 return upper_word_part + (lower_bytes - lower_word_part);
3679 else
3680 return lower_word_part + (upper_bytes - upper_word_part);
3681 }
3682 }
3683
3684 /* Fill in information about a subreg of a hard register.
3685 xregno - A regno of an inner hard subreg_reg (or what will become one).
3686 xmode - The mode of xregno.
3687 offset - The byte offset.
3688 ymode - The mode of a top level SUBREG (or what may become one).
3689 info - Pointer to structure to fill in.
3690
3691 Rather than considering one particular inner register (and thus one
3692 particular "outer" register) in isolation, this function really uses
3693 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3694 function does not check whether adding INFO->offset to XREGNO gives
3695 a valid hard register; even if INFO->offset + XREGNO is out of range,
3696 there might be another register of the same type that is in range.
3697 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3698 the new register, since that can depend on things like whether the final
3699 register number is even or odd. Callers that want to check whether
3700 this particular subreg can be replaced by a simple (reg ...) should
3701 use simplify_subreg_regno. */
3702
3703 void
3704 subreg_get_info (unsigned int xregno, machine_mode xmode,
3705 poly_uint64 offset, machine_mode ymode,
3706 struct subreg_info *info)
3707 {
3708 unsigned int nregs_xmode, nregs_ymode;
3709
3710 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3711
3712 poly_uint64 xsize = GET_MODE_SIZE (xmode);
3713 poly_uint64 ysize = GET_MODE_SIZE (ymode);
3714
3715 bool rknown = false;
3716
3717 /* If the register representation of a non-scalar mode has holes in it,
3718 we expect the scalar units to be concatenated together, with the holes
3719 distributed evenly among the scalar units. Each scalar unit must occupy
3720 at least one register. */
3721 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3722 {
3723 /* As a consequence, we must be dealing with a constant number of
3724 scalars, and thus a constant offset and number of units. */
3725 HOST_WIDE_INT coffset = offset.to_constant ();
3726 HOST_WIDE_INT cysize = ysize.to_constant ();
3727 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3728 unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3729 scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3730 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3731 gcc_assert (nregs_xmode
3732 == (nunits
3733 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3734 gcc_assert (hard_regno_nregs (xregno, xmode)
3735 == hard_regno_nregs (xregno, xmode_unit) * nunits);
3736
3737 /* You can only ask for a SUBREG of a value with holes in the middle
3738 if you don't cross the holes. (Such a SUBREG should be done by
3739 picking a different register class, or doing it in memory if
3740 necessary.) An example of a value with holes is XCmode on 32-bit
3741 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3742 3 for each part, but in memory it's two 128-bit parts.
3743 Padding is assumed to be at the end (not necessarily the 'high part')
3744 of each unit. */
3745 if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3746 && (coffset / GET_MODE_SIZE (xmode_unit)
3747 != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3748 {
3749 info->representable_p = false;
3750 rknown = true;
3751 }
3752 }
3753 else
3754 nregs_xmode = hard_regno_nregs (xregno, xmode);
3755
3756 nregs_ymode = hard_regno_nregs (xregno, ymode);
3757
3758 /* Subreg sizes must be ordered, so that we can tell whether they are
3759 partial, paradoxical or complete. */
3760 gcc_checking_assert (ordered_p (xsize, ysize));
3761
3762 /* Paradoxical subregs are otherwise valid. */
3763 if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3764 {
3765 info->representable_p = true;
3766 /* If this is a big endian paradoxical subreg, which uses more
3767 actual hard registers than the original register, we must
3768 return a negative offset so that we find the proper highpart
3769 of the register.
3770
3771 We assume that the ordering of registers within a multi-register
3772 value has a consistent endianness: if bytes and register words
3773 have different endianness, the hard registers that make up a
3774 multi-register value must be at least word-sized. */
3775 if (REG_WORDS_BIG_ENDIAN)
3776 info->offset = (int) nregs_xmode - (int) nregs_ymode;
3777 else
3778 info->offset = 0;
3779 info->nregs = nregs_ymode;
3780 return;
3781 }
3782
3783 /* If registers store different numbers of bits in the different
3784 modes, we cannot generally form this subreg. */
3785 poly_uint64 regsize_xmode, regsize_ymode;
3786 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3787 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3788 && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3789 && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3790 {
3791 if (!rknown
3792 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3793 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3794 {
3795 info->representable_p = false;
3796 if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3797 || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3798 /* Checked by validate_subreg. We must know at compile time
3799 which inner registers are being accessed. */
3800 gcc_unreachable ();
3801 return;
3802 }
3803 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3804 would go outside of XMODE. */
3805 if (!rknown && maybe_gt (ysize + offset, xsize))
3806 {
3807 info->representable_p = false;
3808 info->nregs = nregs_ymode;
3809 if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3810 /* Checked by validate_subreg. We must know at compile time
3811 which inner registers are being accessed. */
3812 gcc_unreachable ();
3813 return;
3814 }
3815 /* Quick exit for the simple and common case of extracting whole
3816 subregisters from a multiregister value. */
3817 /* ??? It would be better to integrate this into the code below,
3818 if we can generalize the concept enough and figure out how
3819 odd-sized modes can coexist with the other weird cases we support. */
3820 HOST_WIDE_INT count;
3821 if (!rknown
3822 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3823 && known_eq (regsize_xmode, regsize_ymode)
3824 && constant_multiple_p (offset, regsize_ymode, &count))
3825 {
3826 info->representable_p = true;
3827 info->nregs = nregs_ymode;
3828 info->offset = count;
3829 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3830 return;
3831 }
3832 }
3833
3834 /* Lowpart subregs are otherwise valid. */
3835 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3836 {
3837 info->representable_p = true;
3838 rknown = true;
3839
3840 if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3841 {
3842 info->offset = 0;
3843 info->nregs = nregs_ymode;
3844 return;
3845 }
3846 }
3847
3848 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3849 values there are in (reg:XMODE XREGNO). We can view the register
3850 as consisting of this number of independent "blocks", where each
3851 block occupies NREGS_YMODE registers and contains exactly one
3852 representable YMODE value. */
3853 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3854 unsigned int num_blocks = nregs_xmode / nregs_ymode;
3855
3856 /* Calculate the number of bytes in each block. This must always
3857 be exact, otherwise we don't know how to verify the constraint.
3858 These conditions may be relaxed but subreg_regno_offset would
3859 need to be redesigned. */
3860 poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3861
3862 /* Get the number of the first block that contains the subreg and the byte
3863 offset of the subreg from the start of that block. */
3864 unsigned int block_number;
3865 poly_uint64 subblock_offset;
3866 if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3867 &subblock_offset))
3868 /* Checked by validate_subreg. We must know at compile time which
3869 inner registers are being accessed. */
3870 gcc_unreachable ();
3871
3872 if (!rknown)
3873 {
3874 /* Only the lowpart of each block is representable. */
3875 info->representable_p
3876 = known_eq (subblock_offset,
3877 subreg_size_lowpart_offset (ysize, bytes_per_block));
3878 rknown = true;
3879 }
3880
3881 /* We assume that the ordering of registers within a multi-register
3882 value has a consistent endianness: if bytes and register words
3883 have different endianness, the hard registers that make up a
3884 multi-register value must be at least word-sized. */
3885 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3886 /* The block number we calculated above followed memory endianness.
3887 Convert it to register endianness by counting back from the end.
3888 (Note that, because of the assumption above, each block must be
3889 at least word-sized.) */
3890 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3891 else
3892 info->offset = block_number * nregs_ymode;
3893 info->nregs = nregs_ymode;
3894 }
3895
3896 /* This function returns the regno offset of a subreg expression.
3897 xregno - A regno of an inner hard subreg_reg (or what will become one).
3898 xmode - The mode of xregno.
3899 offset - The byte offset.
3900 ymode - The mode of a top level SUBREG (or what may become one).
3901 RETURN - The regno offset which would be used. */
3902 unsigned int
3903 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3904 poly_uint64 offset, machine_mode ymode)
3905 {
3906 struct subreg_info info;
3907 subreg_get_info (xregno, xmode, offset, ymode, &info);
3908 return info.offset;
3909 }
3910
3911 /* This function returns true when the offset is representable via
3912 subreg_offset in the given regno.
3913 xregno - A regno of an inner hard subreg_reg (or what will become one).
3914 xmode - The mode of xregno.
3915 offset - The byte offset.
3916 ymode - The mode of a top level SUBREG (or what may become one).
3917 RETURN - Whether the offset is representable. */
3918 bool
3919 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3920 poly_uint64 offset, machine_mode ymode)
3921 {
3922 struct subreg_info info;
3923 subreg_get_info (xregno, xmode, offset, ymode, &info);
3924 return info.representable_p;
3925 }
3926
3927 /* Return the number of a YMODE register to which
3928
3929 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3930
3931 can be simplified. Return -1 if the subreg can't be simplified.
3932
3933 XREGNO is a hard register number. */
3934
3935 int
3936 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3937 poly_uint64 offset, machine_mode ymode)
3938 {
3939 struct subreg_info info;
3940 unsigned int yregno;
3941
3942 /* Give the backend a chance to disallow the mode change. */
3943 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3944 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3945 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
3946 /* We can use mode change in LRA for some transformations. */
3947 && ! lra_in_progress)
3948 return -1;
3949
3950 /* We shouldn't simplify stack-related registers. */
3951 if ((!reload_completed || frame_pointer_needed)
3952 && xregno == FRAME_POINTER_REGNUM)
3953 return -1;
3954
3955 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3956 && xregno == ARG_POINTER_REGNUM)
3957 return -1;
3958
3959 if (xregno == STACK_POINTER_REGNUM
3960 /* We should convert hard stack register in LRA if it is
3961 possible. */
3962 && ! lra_in_progress)
3963 return -1;
3964
3965 /* Try to get the register offset. */
3966 subreg_get_info (xregno, xmode, offset, ymode, &info);
3967 if (!info.representable_p)
3968 return -1;
3969
3970 /* Make sure that the offsetted register value is in range. */
3971 yregno = xregno + info.offset;
3972 if (!HARD_REGISTER_NUM_P (yregno))
3973 return -1;
3974
3975 /* See whether (reg:YMODE YREGNO) is valid.
3976
3977 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3978 This is a kludge to work around how complex FP arguments are passed
3979 on IA-64 and should be fixed. See PR target/49226. */
3980 if (!targetm.hard_regno_mode_ok (yregno, ymode)
3981 && targetm.hard_regno_mode_ok (xregno, xmode))
3982 return -1;
3983
3984 return (int) yregno;
3985 }
3986
3987 /* Return the final regno that a subreg expression refers to. */
3988 unsigned int
3989 subreg_regno (const_rtx x)
3990 {
3991 unsigned int ret;
3992 rtx subreg = SUBREG_REG (x);
3993 int regno = REGNO (subreg);
3994
3995 ret = regno + subreg_regno_offset (regno,
3996 GET_MODE (subreg),
3997 SUBREG_BYTE (x),
3998 GET_MODE (x));
3999 return ret;
4000
4001 }
4002
4003 /* Return the number of registers that a subreg expression refers
4004 to. */
4005 unsigned int
4006 subreg_nregs (const_rtx x)
4007 {
4008 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4009 }
4010
4011 /* Return the number of registers that a subreg REG with REGNO
4012 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4013 changed so that the regno can be passed in. */
4014
4015 unsigned int
4016 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4017 {
4018 struct subreg_info info;
4019 rtx subreg = SUBREG_REG (x);
4020
4021 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4022 &info);
4023 return info.nregs;
4024 }
4025
4026 struct parms_set_data
4027 {
4028 int nregs;
4029 HARD_REG_SET regs;
4030 };
4031
4032 /* Helper function for noticing stores to parameter registers. */
4033 static void
4034 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4035 {
4036 struct parms_set_data *const d = (struct parms_set_data *) data;
4037 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4038 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4039 {
4040 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4041 d->nregs--;
4042 }
4043 }
4044
4045 /* Look backward for first parameter to be loaded.
4046 Note that loads of all parameters will not necessarily be
4047 found if CSE has eliminated some of them (e.g., an argument
4048 to the outer function is passed down as a parameter).
4049 Do not skip BOUNDARY. */
4050 rtx_insn *
4051 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4052 {
4053 struct parms_set_data parm;
4054 rtx p;
4055 rtx_insn *before, *first_set;
4056
4057 /* Since different machines initialize their parameter registers
4058 in different orders, assume nothing. Collect the set of all
4059 parameter registers. */
4060 CLEAR_HARD_REG_SET (parm.regs);
4061 parm.nregs = 0;
4062 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4063 if (GET_CODE (XEXP (p, 0)) == USE
4064 && REG_P (XEXP (XEXP (p, 0), 0))
4065 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4066 {
4067 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4068
4069 /* We only care about registers which can hold function
4070 arguments. */
4071 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4072 continue;
4073
4074 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4075 parm.nregs++;
4076 }
4077 before = call_insn;
4078 first_set = call_insn;
4079
4080 /* Search backward for the first set of a register in this set. */
4081 while (parm.nregs && before != boundary)
4082 {
4083 before = PREV_INSN (before);
4084
4085 /* It is possible that some loads got CSEed from one call to
4086 another. Stop in that case. */
4087 if (CALL_P (before))
4088 break;
4089
4090 /* Our caller needs either ensure that we will find all sets
4091 (in case code has not been optimized yet), or take care
4092 for possible labels in a way by setting boundary to preceding
4093 CODE_LABEL. */
4094 if (LABEL_P (before))
4095 {
4096 gcc_assert (before == boundary);
4097 break;
4098 }
4099
4100 if (INSN_P (before))
4101 {
4102 int nregs_old = parm.nregs;
4103 note_stores (PATTERN (before), parms_set, &parm);
4104 /* If we found something that did not set a parameter reg,
4105 we're done. Do not keep going, as that might result
4106 in hoisting an insn before the setting of a pseudo
4107 that is used by the hoisted insn. */
4108 if (nregs_old != parm.nregs)
4109 first_set = before;
4110 else
4111 break;
4112 }
4113 }
4114 return first_set;
4115 }
4116
4117 /* Return true if we should avoid inserting code between INSN and preceding
4118 call instruction. */
4119
4120 bool
4121 keep_with_call_p (const rtx_insn *insn)
4122 {
4123 rtx set;
4124
4125 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4126 {
4127 if (REG_P (SET_DEST (set))
4128 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4129 && fixed_regs[REGNO (SET_DEST (set))]
4130 && general_operand (SET_SRC (set), VOIDmode))
4131 return true;
4132 if (REG_P (SET_SRC (set))
4133 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4134 && REG_P (SET_DEST (set))
4135 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4136 return true;
4137 /* There may be a stack pop just after the call and before the store
4138 of the return register. Search for the actual store when deciding
4139 if we can break or not. */
4140 if (SET_DEST (set) == stack_pointer_rtx)
4141 {
4142 /* This CONST_CAST is okay because next_nonnote_insn just
4143 returns its argument and we assign it to a const_rtx
4144 variable. */
4145 const rtx_insn *i2
4146 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4147 if (i2 && keep_with_call_p (i2))
4148 return true;
4149 }
4150 }
4151 return false;
4152 }
4153
4154 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4155 to non-complex jumps. That is, direct unconditional, conditional,
4156 and tablejumps, but not computed jumps or returns. It also does
4157 not apply to the fallthru case of a conditional jump. */
4158
4159 bool
4160 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4161 {
4162 rtx tmp = JUMP_LABEL (jump_insn);
4163 rtx_jump_table_data *table;
4164
4165 if (label == tmp)
4166 return true;
4167
4168 if (tablejump_p (jump_insn, NULL, &table))
4169 {
4170 rtvec vec = table->get_labels ();
4171 int i, veclen = GET_NUM_ELEM (vec);
4172
4173 for (i = 0; i < veclen; ++i)
4174 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4175 return true;
4176 }
4177
4178 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4179 return true;
4180
4181 return false;
4182 }
4183
4184 \f
4185 /* Return an estimate of the cost of computing rtx X.
4186 One use is in cse, to decide which expression to keep in the hash table.
4187 Another is in rtl generation, to pick the cheapest way to multiply.
4188 Other uses like the latter are expected in the future.
4189
4190 X appears as operand OPNO in an expression with code OUTER_CODE.
4191 SPEED specifies whether costs optimized for speed or size should
4192 be returned. */
4193
4194 int
4195 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4196 int opno, bool speed)
4197 {
4198 int i, j;
4199 enum rtx_code code;
4200 const char *fmt;
4201 int total;
4202 int factor;
4203
4204 if (x == 0)
4205 return 0;
4206
4207 if (GET_MODE (x) != VOIDmode)
4208 mode = GET_MODE (x);
4209
4210 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4211 many insns, taking N times as long. */
4212 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4213 if (factor == 0)
4214 factor = 1;
4215
4216 /* Compute the default costs of certain things.
4217 Note that targetm.rtx_costs can override the defaults. */
4218
4219 code = GET_CODE (x);
4220 switch (code)
4221 {
4222 case MULT:
4223 /* Multiplication has time-complexity O(N*N), where N is the
4224 number of units (translated from digits) when using
4225 schoolbook long multiplication. */
4226 total = factor * factor * COSTS_N_INSNS (5);
4227 break;
4228 case DIV:
4229 case UDIV:
4230 case MOD:
4231 case UMOD:
4232 /* Similarly, complexity for schoolbook long division. */
4233 total = factor * factor * COSTS_N_INSNS (7);
4234 break;
4235 case USE:
4236 /* Used in combine.c as a marker. */
4237 total = 0;
4238 break;
4239 case SET:
4240 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4241 the mode for the factor. */
4242 mode = GET_MODE (SET_DEST (x));
4243 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4244 if (factor == 0)
4245 factor = 1;
4246 /* FALLTHRU */
4247 default:
4248 total = factor * COSTS_N_INSNS (1);
4249 }
4250
4251 switch (code)
4252 {
4253 case REG:
4254 return 0;
4255
4256 case SUBREG:
4257 total = 0;
4258 /* If we can't tie these modes, make this expensive. The larger
4259 the mode, the more expensive it is. */
4260 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4261 return COSTS_N_INSNS (2 + factor);
4262 break;
4263
4264 case TRUNCATE:
4265 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4266 {
4267 total = 0;
4268 break;
4269 }
4270 /* FALLTHRU */
4271 default:
4272 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4273 return total;
4274 break;
4275 }
4276
4277 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4278 which is already in total. */
4279
4280 fmt = GET_RTX_FORMAT (code);
4281 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4282 if (fmt[i] == 'e')
4283 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4284 else if (fmt[i] == 'E')
4285 for (j = 0; j < XVECLEN (x, i); j++)
4286 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4287
4288 return total;
4289 }
4290
4291 /* Fill in the structure C with information about both speed and size rtx
4292 costs for X, which is operand OPNO in an expression with code OUTER. */
4293
4294 void
4295 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4296 struct full_rtx_costs *c)
4297 {
4298 c->speed = rtx_cost (x, mode, outer, opno, true);
4299 c->size = rtx_cost (x, mode, outer, opno, false);
4300 }
4301
4302 \f
4303 /* Return cost of address expression X.
4304 Expect that X is properly formed address reference.
4305
4306 SPEED parameter specify whether costs optimized for speed or size should
4307 be returned. */
4308
4309 int
4310 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4311 {
4312 /* We may be asked for cost of various unusual addresses, such as operands
4313 of push instruction. It is not worthwhile to complicate writing
4314 of the target hook by such cases. */
4315
4316 if (!memory_address_addr_space_p (mode, x, as))
4317 return 1000;
4318
4319 return targetm.address_cost (x, mode, as, speed);
4320 }
4321
4322 /* If the target doesn't override, compute the cost as with arithmetic. */
4323
4324 int
4325 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4326 {
4327 return rtx_cost (x, Pmode, MEM, 0, speed);
4328 }
4329 \f
4330
4331 unsigned HOST_WIDE_INT
4332 nonzero_bits (const_rtx x, machine_mode mode)
4333 {
4334 if (mode == VOIDmode)
4335 mode = GET_MODE (x);
4336 scalar_int_mode int_mode;
4337 if (!is_a <scalar_int_mode> (mode, &int_mode))
4338 return GET_MODE_MASK (mode);
4339 return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4340 }
4341
4342 unsigned int
4343 num_sign_bit_copies (const_rtx x, machine_mode mode)
4344 {
4345 if (mode == VOIDmode)
4346 mode = GET_MODE (x);
4347 scalar_int_mode int_mode;
4348 if (!is_a <scalar_int_mode> (mode, &int_mode))
4349 return 1;
4350 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4351 }
4352
4353 /* Return true if nonzero_bits1 might recurse into both operands
4354 of X. */
4355
4356 static inline bool
4357 nonzero_bits_binary_arith_p (const_rtx x)
4358 {
4359 if (!ARITHMETIC_P (x))
4360 return false;
4361 switch (GET_CODE (x))
4362 {
4363 case AND:
4364 case XOR:
4365 case IOR:
4366 case UMIN:
4367 case UMAX:
4368 case SMIN:
4369 case SMAX:
4370 case PLUS:
4371 case MINUS:
4372 case MULT:
4373 case DIV:
4374 case UDIV:
4375 case MOD:
4376 case UMOD:
4377 return true;
4378 default:
4379 return false;
4380 }
4381 }
4382
4383 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4384 It avoids exponential behavior in nonzero_bits1 when X has
4385 identical subexpressions on the first or the second level. */
4386
4387 static unsigned HOST_WIDE_INT
4388 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4389 machine_mode known_mode,
4390 unsigned HOST_WIDE_INT known_ret)
4391 {
4392 if (x == known_x && mode == known_mode)
4393 return known_ret;
4394
4395 /* Try to find identical subexpressions. If found call
4396 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4397 precomputed value for the subexpression as KNOWN_RET. */
4398
4399 if (nonzero_bits_binary_arith_p (x))
4400 {
4401 rtx x0 = XEXP (x, 0);
4402 rtx x1 = XEXP (x, 1);
4403
4404 /* Check the first level. */
4405 if (x0 == x1)
4406 return nonzero_bits1 (x, mode, x0, mode,
4407 cached_nonzero_bits (x0, mode, known_x,
4408 known_mode, known_ret));
4409
4410 /* Check the second level. */
4411 if (nonzero_bits_binary_arith_p (x0)
4412 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4413 return nonzero_bits1 (x, mode, x1, mode,
4414 cached_nonzero_bits (x1, mode, known_x,
4415 known_mode, known_ret));
4416
4417 if (nonzero_bits_binary_arith_p (x1)
4418 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4419 return nonzero_bits1 (x, mode, x0, mode,
4420 cached_nonzero_bits (x0, mode, known_x,
4421 known_mode, known_ret));
4422 }
4423
4424 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4425 }
4426
4427 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4428 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4429 is less useful. We can't allow both, because that results in exponential
4430 run time recursion. There is a nullstone testcase that triggered
4431 this. This macro avoids accidental uses of num_sign_bit_copies. */
4432 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4433
4434 /* Given an expression, X, compute which bits in X can be nonzero.
4435 We don't care about bits outside of those defined in MODE.
4436
4437 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4438 an arithmetic operation, we can do better. */
4439
4440 static unsigned HOST_WIDE_INT
4441 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4442 machine_mode known_mode,
4443 unsigned HOST_WIDE_INT known_ret)
4444 {
4445 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4446 unsigned HOST_WIDE_INT inner_nz;
4447 enum rtx_code code = GET_CODE (x);
4448 machine_mode inner_mode;
4449 unsigned int inner_width;
4450 scalar_int_mode xmode;
4451
4452 unsigned int mode_width = GET_MODE_PRECISION (mode);
4453
4454 if (CONST_INT_P (x))
4455 {
4456 if (SHORT_IMMEDIATES_SIGN_EXTEND
4457 && INTVAL (x) > 0
4458 && mode_width < BITS_PER_WORD
4459 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4460 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4461
4462 return UINTVAL (x);
4463 }
4464
4465 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4466 return nonzero;
4467 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4468
4469 /* If X is wider than MODE, use its mode instead. */
4470 if (xmode_width > mode_width)
4471 {
4472 mode = xmode;
4473 nonzero = GET_MODE_MASK (mode);
4474 mode_width = xmode_width;
4475 }
4476
4477 if (mode_width > HOST_BITS_PER_WIDE_INT)
4478 /* Our only callers in this case look for single bit values. So
4479 just return the mode mask. Those tests will then be false. */
4480 return nonzero;
4481
4482 /* If MODE is wider than X, but both are a single word for both the host
4483 and target machines, we can compute this from which bits of the object
4484 might be nonzero in its own mode, taking into account the fact that, on
4485 CISC machines, accessing an object in a wider mode generally causes the
4486 high-order bits to become undefined, so they are not known to be zero.
4487 We extend this reasoning to RISC machines for rotate operations since the
4488 semantics of the operations in the larger mode is not well defined. */
4489 if (mode_width > xmode_width
4490 && xmode_width <= BITS_PER_WORD
4491 && xmode_width <= HOST_BITS_PER_WIDE_INT
4492 && (!WORD_REGISTER_OPERATIONS || code == ROTATE || code == ROTATERT))
4493 {
4494 nonzero &= cached_nonzero_bits (x, xmode,
4495 known_x, known_mode, known_ret);
4496 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4497 return nonzero;
4498 }
4499
4500 /* Please keep nonzero_bits_binary_arith_p above in sync with
4501 the code in the switch below. */
4502 switch (code)
4503 {
4504 case REG:
4505 #if defined(POINTERS_EXTEND_UNSIGNED)
4506 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4507 all the bits above ptr_mode are known to be zero. */
4508 /* As we do not know which address space the pointer is referring to,
4509 we can do this only if the target does not support different pointer
4510 or address modes depending on the address space. */
4511 if (target_default_pointer_address_modes_p ()
4512 && POINTERS_EXTEND_UNSIGNED
4513 && xmode == Pmode
4514 && REG_POINTER (x)
4515 && !targetm.have_ptr_extend ())
4516 nonzero &= GET_MODE_MASK (ptr_mode);
4517 #endif
4518
4519 /* Include declared information about alignment of pointers. */
4520 /* ??? We don't properly preserve REG_POINTER changes across
4521 pointer-to-integer casts, so we can't trust it except for
4522 things that we know must be pointers. See execute/960116-1.c. */
4523 if ((x == stack_pointer_rtx
4524 || x == frame_pointer_rtx
4525 || x == arg_pointer_rtx)
4526 && REGNO_POINTER_ALIGN (REGNO (x)))
4527 {
4528 unsigned HOST_WIDE_INT alignment
4529 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4530
4531 #ifdef PUSH_ROUNDING
4532 /* If PUSH_ROUNDING is defined, it is possible for the
4533 stack to be momentarily aligned only to that amount,
4534 so we pick the least alignment. */
4535 if (x == stack_pointer_rtx && PUSH_ARGS)
4536 {
4537 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4538 alignment = MIN (known_alignment (rounded_1), alignment);
4539 }
4540 #endif
4541
4542 nonzero &= ~(alignment - 1);
4543 }
4544
4545 {
4546 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4547 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4548 &nonzero_for_hook);
4549
4550 if (new_rtx)
4551 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4552 known_mode, known_ret);
4553
4554 return nonzero_for_hook;
4555 }
4556
4557 case MEM:
4558 /* In many, if not most, RISC machines, reading a byte from memory
4559 zeros the rest of the register. Noticing that fact saves a lot
4560 of extra zero-extends. */
4561 if (load_extend_op (xmode) == ZERO_EXTEND)
4562 nonzero &= GET_MODE_MASK (xmode);
4563 break;
4564
4565 case EQ: case NE:
4566 case UNEQ: case LTGT:
4567 case GT: case GTU: case UNGT:
4568 case LT: case LTU: case UNLT:
4569 case GE: case GEU: case UNGE:
4570 case LE: case LEU: case UNLE:
4571 case UNORDERED: case ORDERED:
4572 /* If this produces an integer result, we know which bits are set.
4573 Code here used to clear bits outside the mode of X, but that is
4574 now done above. */
4575 /* Mind that MODE is the mode the caller wants to look at this
4576 operation in, and not the actual operation mode. We can wind
4577 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4578 that describes the results of a vector compare. */
4579 if (GET_MODE_CLASS (xmode) == MODE_INT
4580 && mode_width <= HOST_BITS_PER_WIDE_INT)
4581 nonzero = STORE_FLAG_VALUE;
4582 break;
4583
4584 case NEG:
4585 #if 0
4586 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4587 and num_sign_bit_copies. */
4588 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4589 nonzero = 1;
4590 #endif
4591
4592 if (xmode_width < mode_width)
4593 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4594 break;
4595
4596 case ABS:
4597 #if 0
4598 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4599 and num_sign_bit_copies. */
4600 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4601 nonzero = 1;
4602 #endif
4603 break;
4604
4605 case TRUNCATE:
4606 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4607 known_x, known_mode, known_ret)
4608 & GET_MODE_MASK (mode));
4609 break;
4610
4611 case ZERO_EXTEND:
4612 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4613 known_x, known_mode, known_ret);
4614 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4615 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4616 break;
4617
4618 case SIGN_EXTEND:
4619 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4620 Otherwise, show all the bits in the outer mode but not the inner
4621 may be nonzero. */
4622 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4623 known_x, known_mode, known_ret);
4624 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4625 {
4626 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4627 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4628 inner_nz |= (GET_MODE_MASK (mode)
4629 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4630 }
4631
4632 nonzero &= inner_nz;
4633 break;
4634
4635 case AND:
4636 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4637 known_x, known_mode, known_ret)
4638 & cached_nonzero_bits (XEXP (x, 1), mode,
4639 known_x, known_mode, known_ret);
4640 break;
4641
4642 case XOR: case IOR:
4643 case UMIN: case UMAX: case SMIN: case SMAX:
4644 {
4645 unsigned HOST_WIDE_INT nonzero0
4646 = cached_nonzero_bits (XEXP (x, 0), mode,
4647 known_x, known_mode, known_ret);
4648
4649 /* Don't call nonzero_bits for the second time if it cannot change
4650 anything. */
4651 if ((nonzero & nonzero0) != nonzero)
4652 nonzero &= nonzero0
4653 | cached_nonzero_bits (XEXP (x, 1), mode,
4654 known_x, known_mode, known_ret);
4655 }
4656 break;
4657
4658 case PLUS: case MINUS:
4659 case MULT:
4660 case DIV: case UDIV:
4661 case MOD: case UMOD:
4662 /* We can apply the rules of arithmetic to compute the number of
4663 high- and low-order zero bits of these operations. We start by
4664 computing the width (position of the highest-order nonzero bit)
4665 and the number of low-order zero bits for each value. */
4666 {
4667 unsigned HOST_WIDE_INT nz0
4668 = cached_nonzero_bits (XEXP (x, 0), mode,
4669 known_x, known_mode, known_ret);
4670 unsigned HOST_WIDE_INT nz1
4671 = cached_nonzero_bits (XEXP (x, 1), mode,
4672 known_x, known_mode, known_ret);
4673 int sign_index = xmode_width - 1;
4674 int width0 = floor_log2 (nz0) + 1;
4675 int width1 = floor_log2 (nz1) + 1;
4676 int low0 = ctz_or_zero (nz0);
4677 int low1 = ctz_or_zero (nz1);
4678 unsigned HOST_WIDE_INT op0_maybe_minusp
4679 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4680 unsigned HOST_WIDE_INT op1_maybe_minusp
4681 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4682 unsigned int result_width = mode_width;
4683 int result_low = 0;
4684
4685 switch (code)
4686 {
4687 case PLUS:
4688 result_width = MAX (width0, width1) + 1;
4689 result_low = MIN (low0, low1);
4690 break;
4691 case MINUS:
4692 result_low = MIN (low0, low1);
4693 break;
4694 case MULT:
4695 result_width = width0 + width1;
4696 result_low = low0 + low1;
4697 break;
4698 case DIV:
4699 if (width1 == 0)
4700 break;
4701 if (!op0_maybe_minusp && !op1_maybe_minusp)
4702 result_width = width0;
4703 break;
4704 case UDIV:
4705 if (width1 == 0)
4706 break;
4707 result_width = width0;
4708 break;
4709 case MOD:
4710 if (width1 == 0)
4711 break;
4712 if (!op0_maybe_minusp && !op1_maybe_minusp)
4713 result_width = MIN (width0, width1);
4714 result_low = MIN (low0, low1);
4715 break;
4716 case UMOD:
4717 if (width1 == 0)
4718 break;
4719 result_width = MIN (width0, width1);
4720 result_low = MIN (low0, low1);
4721 break;
4722 default:
4723 gcc_unreachable ();
4724 }
4725
4726 if (result_width < mode_width)
4727 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4728
4729 if (result_low > 0)
4730 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4731 }
4732 break;
4733
4734 case ZERO_EXTRACT:
4735 if (CONST_INT_P (XEXP (x, 1))
4736 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4737 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4738 break;
4739
4740 case SUBREG:
4741 /* If this is a SUBREG formed for a promoted variable that has
4742 been zero-extended, we know that at least the high-order bits
4743 are zero, though others might be too. */
4744 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4745 nonzero = GET_MODE_MASK (xmode)
4746 & cached_nonzero_bits (SUBREG_REG (x), xmode,
4747 known_x, known_mode, known_ret);
4748
4749 /* If the inner mode is a single word for both the host and target
4750 machines, we can compute this from which bits of the inner
4751 object might be nonzero. */
4752 inner_mode = GET_MODE (SUBREG_REG (x));
4753 if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4754 && inner_width <= BITS_PER_WORD
4755 && inner_width <= HOST_BITS_PER_WIDE_INT)
4756 {
4757 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4758 known_x, known_mode, known_ret);
4759
4760 /* On many CISC machines, accessing an object in a wider mode
4761 causes the high-order bits to become undefined. So they are
4762 not known to be zero. */
4763 rtx_code extend_op;
4764 if ((!WORD_REGISTER_OPERATIONS
4765 /* If this is a typical RISC machine, we only have to worry
4766 about the way loads are extended. */
4767 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4768 ? val_signbit_known_set_p (inner_mode, nonzero)
4769 : extend_op != ZERO_EXTEND)
4770 || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4771 && xmode_width > inner_width)
4772 nonzero
4773 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4774 }
4775 break;
4776
4777 case ASHIFT:
4778 case ASHIFTRT:
4779 case LSHIFTRT:
4780 case ROTATE:
4781 case ROTATERT:
4782 /* The nonzero bits are in two classes: any bits within MODE
4783 that aren't in xmode are always significant. The rest of the
4784 nonzero bits are those that are significant in the operand of
4785 the shift when shifted the appropriate number of bits. This
4786 shows that high-order bits are cleared by the right shift and
4787 low-order bits by left shifts. */
4788 if (CONST_INT_P (XEXP (x, 1))
4789 && INTVAL (XEXP (x, 1)) >= 0
4790 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4791 && INTVAL (XEXP (x, 1)) < xmode_width)
4792 {
4793 int count = INTVAL (XEXP (x, 1));
4794 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4795 unsigned HOST_WIDE_INT op_nonzero
4796 = cached_nonzero_bits (XEXP (x, 0), mode,
4797 known_x, known_mode, known_ret);
4798 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4799 unsigned HOST_WIDE_INT outer = 0;
4800
4801 if (mode_width > xmode_width)
4802 outer = (op_nonzero & nonzero & ~mode_mask);
4803
4804 switch (code)
4805 {
4806 case ASHIFT:
4807 inner <<= count;
4808 break;
4809
4810 case LSHIFTRT:
4811 inner >>= count;
4812 break;
4813
4814 case ASHIFTRT:
4815 inner >>= count;
4816
4817 /* If the sign bit may have been nonzero before the shift, we
4818 need to mark all the places it could have been copied to
4819 by the shift as possibly nonzero. */
4820 if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4821 inner |= (((HOST_WIDE_INT_1U << count) - 1)
4822 << (xmode_width - count));
4823 break;
4824
4825 case ROTATE:
4826 inner = (inner << (count % xmode_width)
4827 | (inner >> (xmode_width - (count % xmode_width))))
4828 & mode_mask;
4829 break;
4830
4831 case ROTATERT:
4832 inner = (inner >> (count % xmode_width)
4833 | (inner << (xmode_width - (count % xmode_width))))
4834 & mode_mask;
4835 break;
4836
4837 default:
4838 gcc_unreachable ();
4839 }
4840
4841 nonzero &= (outer | inner);
4842 }
4843 break;
4844
4845 case FFS:
4846 case POPCOUNT:
4847 /* This is at most the number of bits in the mode. */
4848 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4849 break;
4850
4851 case CLZ:
4852 /* If CLZ has a known value at zero, then the nonzero bits are
4853 that value, plus the number of bits in the mode minus one. */
4854 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4855 nonzero
4856 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4857 else
4858 nonzero = -1;
4859 break;
4860
4861 case CTZ:
4862 /* If CTZ has a known value at zero, then the nonzero bits are
4863 that value, plus the number of bits in the mode minus one. */
4864 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4865 nonzero
4866 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4867 else
4868 nonzero = -1;
4869 break;
4870
4871 case CLRSB:
4872 /* This is at most the number of bits in the mode minus 1. */
4873 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4874 break;
4875
4876 case PARITY:
4877 nonzero = 1;
4878 break;
4879
4880 case IF_THEN_ELSE:
4881 {
4882 unsigned HOST_WIDE_INT nonzero_true
4883 = cached_nonzero_bits (XEXP (x, 1), mode,
4884 known_x, known_mode, known_ret);
4885
4886 /* Don't call nonzero_bits for the second time if it cannot change
4887 anything. */
4888 if ((nonzero & nonzero_true) != nonzero)
4889 nonzero &= nonzero_true
4890 | cached_nonzero_bits (XEXP (x, 2), mode,
4891 known_x, known_mode, known_ret);
4892 }
4893 break;
4894
4895 default:
4896 break;
4897 }
4898
4899 return nonzero;
4900 }
4901
4902 /* See the macro definition above. */
4903 #undef cached_num_sign_bit_copies
4904
4905 \f
4906 /* Return true if num_sign_bit_copies1 might recurse into both operands
4907 of X. */
4908
4909 static inline bool
4910 num_sign_bit_copies_binary_arith_p (const_rtx x)
4911 {
4912 if (!ARITHMETIC_P (x))
4913 return false;
4914 switch (GET_CODE (x))
4915 {
4916 case IOR:
4917 case AND:
4918 case XOR:
4919 case SMIN:
4920 case SMAX:
4921 case UMIN:
4922 case UMAX:
4923 case PLUS:
4924 case MINUS:
4925 case MULT:
4926 return true;
4927 default:
4928 return false;
4929 }
4930 }
4931
4932 /* The function cached_num_sign_bit_copies is a wrapper around
4933 num_sign_bit_copies1. It avoids exponential behavior in
4934 num_sign_bit_copies1 when X has identical subexpressions on the
4935 first or the second level. */
4936
4937 static unsigned int
4938 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4939 const_rtx known_x, machine_mode known_mode,
4940 unsigned int known_ret)
4941 {
4942 if (x == known_x && mode == known_mode)
4943 return known_ret;
4944
4945 /* Try to find identical subexpressions. If found call
4946 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4947 the precomputed value for the subexpression as KNOWN_RET. */
4948
4949 if (num_sign_bit_copies_binary_arith_p (x))
4950 {
4951 rtx x0 = XEXP (x, 0);
4952 rtx x1 = XEXP (x, 1);
4953
4954 /* Check the first level. */
4955 if (x0 == x1)
4956 return
4957 num_sign_bit_copies1 (x, mode, x0, mode,
4958 cached_num_sign_bit_copies (x0, mode, known_x,
4959 known_mode,
4960 known_ret));
4961
4962 /* Check the second level. */
4963 if (num_sign_bit_copies_binary_arith_p (x0)
4964 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4965 return
4966 num_sign_bit_copies1 (x, mode, x1, mode,
4967 cached_num_sign_bit_copies (x1, mode, known_x,
4968 known_mode,
4969 known_ret));
4970
4971 if (num_sign_bit_copies_binary_arith_p (x1)
4972 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4973 return
4974 num_sign_bit_copies1 (x, mode, x0, mode,
4975 cached_num_sign_bit_copies (x0, mode, known_x,
4976 known_mode,
4977 known_ret));
4978 }
4979
4980 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4981 }
4982
4983 /* Return the number of bits at the high-order end of X that are known to
4984 be equal to the sign bit. X will be used in mode MODE. The returned
4985 value will always be between 1 and the number of bits in MODE. */
4986
4987 static unsigned int
4988 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4989 machine_mode known_mode,
4990 unsigned int known_ret)
4991 {
4992 enum rtx_code code = GET_CODE (x);
4993 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4994 int num0, num1, result;
4995 unsigned HOST_WIDE_INT nonzero;
4996
4997 if (CONST_INT_P (x))
4998 {
4999 /* If the constant is negative, take its 1's complement and remask.
5000 Then see how many zero bits we have. */
5001 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5002 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5003 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5004 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5005
5006 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5007 }
5008
5009 scalar_int_mode xmode, inner_mode;
5010 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5011 return 1;
5012
5013 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5014
5015 /* For a smaller mode, just ignore the high bits. */
5016 if (bitwidth < xmode_width)
5017 {
5018 num0 = cached_num_sign_bit_copies (x, xmode,
5019 known_x, known_mode, known_ret);
5020 return MAX (1, num0 - (int) (xmode_width - bitwidth));
5021 }
5022
5023 if (bitwidth > xmode_width)
5024 {
5025 /* If this machine does not do all register operations on the entire
5026 register and MODE is wider than the mode of X, we can say nothing
5027 at all about the high-order bits. We extend this reasoning to every
5028 machine for rotate operations since the semantics of the operations
5029 in the larger mode is not well defined. */
5030 if (!WORD_REGISTER_OPERATIONS || code == ROTATE || code == ROTATERT)
5031 return 1;
5032
5033 /* Likewise on machines that do, if the mode of the object is smaller
5034 than a word and loads of that size don't sign extend, we can say
5035 nothing about the high order bits. */
5036 if (xmode_width < BITS_PER_WORD
5037 && load_extend_op (xmode) != SIGN_EXTEND)
5038 return 1;
5039 }
5040
5041 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5042 the code in the switch below. */
5043 switch (code)
5044 {
5045 case REG:
5046
5047 #if defined(POINTERS_EXTEND_UNSIGNED)
5048 /* If pointers extend signed and this is a pointer in Pmode, say that
5049 all the bits above ptr_mode are known to be sign bit copies. */
5050 /* As we do not know which address space the pointer is referring to,
5051 we can do this only if the target does not support different pointer
5052 or address modes depending on the address space. */
5053 if (target_default_pointer_address_modes_p ()
5054 && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5055 && mode == Pmode && REG_POINTER (x)
5056 && !targetm.have_ptr_extend ())
5057 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5058 #endif
5059
5060 {
5061 unsigned int copies_for_hook = 1, copies = 1;
5062 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5063 &copies_for_hook);
5064
5065 if (new_rtx)
5066 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5067 known_mode, known_ret);
5068
5069 if (copies > 1 || copies_for_hook > 1)
5070 return MAX (copies, copies_for_hook);
5071
5072 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5073 }
5074 break;
5075
5076 case MEM:
5077 /* Some RISC machines sign-extend all loads of smaller than a word. */
5078 if (load_extend_op (xmode) == SIGN_EXTEND)
5079 return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5080 break;
5081
5082 case SUBREG:
5083 /* If this is a SUBREG for a promoted object that is sign-extended
5084 and we are looking at it in a wider mode, we know that at least the
5085 high-order bits are known to be sign bit copies. */
5086
5087 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5088 {
5089 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5090 known_x, known_mode, known_ret);
5091 return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5092 }
5093
5094 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5095 {
5096 /* For a smaller object, just ignore the high bits. */
5097 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5098 {
5099 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5100 known_x, known_mode,
5101 known_ret);
5102 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5103 - bitwidth));
5104 }
5105
5106 /* For paradoxical SUBREGs on machines where all register operations
5107 affect the entire register, just look inside. Note that we are
5108 passing MODE to the recursive call, so the number of sign bit
5109 copies will remain relative to that mode, not the inner mode. */
5110
5111 /* This works only if loads sign extend. Otherwise, if we get a
5112 reload for the inner part, it may be loaded from the stack, and
5113 then we lose all sign bit copies that existed before the store
5114 to the stack. */
5115
5116 if (WORD_REGISTER_OPERATIONS
5117 && load_extend_op (inner_mode) == SIGN_EXTEND
5118 && paradoxical_subreg_p (x)
5119 && MEM_P (SUBREG_REG (x)))
5120 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5121 known_x, known_mode, known_ret);
5122 }
5123 break;
5124
5125 case SIGN_EXTRACT:
5126 if (CONST_INT_P (XEXP (x, 1)))
5127 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5128 break;
5129
5130 case SIGN_EXTEND:
5131 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5132 return (bitwidth - GET_MODE_PRECISION (inner_mode)
5133 + cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5134 known_x, known_mode, known_ret));
5135 break;
5136
5137 case TRUNCATE:
5138 /* For a smaller object, just ignore the high bits. */
5139 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5140 num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5141 known_x, known_mode, known_ret);
5142 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5143 - bitwidth)));
5144
5145 case NOT:
5146 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5147 known_x, known_mode, known_ret);
5148
5149 case ROTATE: case ROTATERT:
5150 /* If we are rotating left by a number of bits less than the number
5151 of sign bit copies, we can just subtract that amount from the
5152 number. */
5153 if (CONST_INT_P (XEXP (x, 1))
5154 && INTVAL (XEXP (x, 1)) >= 0
5155 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5156 {
5157 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5158 known_x, known_mode, known_ret);
5159 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5160 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5161 }
5162 break;
5163
5164 case NEG:
5165 /* In general, this subtracts one sign bit copy. But if the value
5166 is known to be positive, the number of sign bit copies is the
5167 same as that of the input. Finally, if the input has just one bit
5168 that might be nonzero, all the bits are copies of the sign bit. */
5169 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5170 known_x, known_mode, known_ret);
5171 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5172 return num0 > 1 ? num0 - 1 : 1;
5173
5174 nonzero = nonzero_bits (XEXP (x, 0), mode);
5175 if (nonzero == 1)
5176 return bitwidth;
5177
5178 if (num0 > 1
5179 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5180 num0--;
5181
5182 return num0;
5183
5184 case IOR: case AND: case XOR:
5185 case SMIN: case SMAX: case UMIN: case UMAX:
5186 /* Logical operations will preserve the number of sign-bit copies.
5187 MIN and MAX operations always return one of the operands. */
5188 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5189 known_x, known_mode, known_ret);
5190 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5191 known_x, known_mode, known_ret);
5192
5193 /* If num1 is clearing some of the top bits then regardless of
5194 the other term, we are guaranteed to have at least that many
5195 high-order zero bits. */
5196 if (code == AND
5197 && num1 > 1
5198 && bitwidth <= HOST_BITS_PER_WIDE_INT
5199 && CONST_INT_P (XEXP (x, 1))
5200 && (UINTVAL (XEXP (x, 1))
5201 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5202 return num1;
5203
5204 /* Similarly for IOR when setting high-order bits. */
5205 if (code == IOR
5206 && num1 > 1
5207 && bitwidth <= HOST_BITS_PER_WIDE_INT
5208 && CONST_INT_P (XEXP (x, 1))
5209 && (UINTVAL (XEXP (x, 1))
5210 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5211 return num1;
5212
5213 return MIN (num0, num1);
5214
5215 case PLUS: case MINUS:
5216 /* For addition and subtraction, we can have a 1-bit carry. However,
5217 if we are subtracting 1 from a positive number, there will not
5218 be such a carry. Furthermore, if the positive number is known to
5219 be 0 or 1, we know the result is either -1 or 0. */
5220
5221 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5222 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5223 {
5224 nonzero = nonzero_bits (XEXP (x, 0), mode);
5225 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5226 return (nonzero == 1 || nonzero == 0 ? bitwidth
5227 : bitwidth - floor_log2 (nonzero) - 1);
5228 }
5229
5230 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5231 known_x, known_mode, known_ret);
5232 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5233 known_x, known_mode, known_ret);
5234 result = MAX (1, MIN (num0, num1) - 1);
5235
5236 return result;
5237
5238 case MULT:
5239 /* The number of bits of the product is the sum of the number of
5240 bits of both terms. However, unless one of the terms if known
5241 to be positive, we must allow for an additional bit since negating
5242 a negative number can remove one sign bit copy. */
5243
5244 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5245 known_x, known_mode, known_ret);
5246 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5247 known_x, known_mode, known_ret);
5248
5249 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5250 if (result > 0
5251 && (bitwidth > HOST_BITS_PER_WIDE_INT
5252 || (((nonzero_bits (XEXP (x, 0), mode)
5253 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5254 && ((nonzero_bits (XEXP (x, 1), mode)
5255 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5256 != 0))))
5257 result--;
5258
5259 return MAX (1, result);
5260
5261 case UDIV:
5262 /* The result must be <= the first operand. If the first operand
5263 has the high bit set, we know nothing about the number of sign
5264 bit copies. */
5265 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5266 return 1;
5267 else if ((nonzero_bits (XEXP (x, 0), mode)
5268 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5269 return 1;
5270 else
5271 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5272 known_x, known_mode, known_ret);
5273
5274 case UMOD:
5275 /* The result must be <= the second operand. If the second operand
5276 has (or just might have) the high bit set, we know nothing about
5277 the number of sign bit copies. */
5278 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5279 return 1;
5280 else if ((nonzero_bits (XEXP (x, 1), mode)
5281 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5282 return 1;
5283 else
5284 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5285 known_x, known_mode, known_ret);
5286
5287 case DIV:
5288 /* Similar to unsigned division, except that we have to worry about
5289 the case where the divisor is negative, in which case we have
5290 to add 1. */
5291 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5292 known_x, known_mode, known_ret);
5293 if (result > 1
5294 && (bitwidth > HOST_BITS_PER_WIDE_INT
5295 || (nonzero_bits (XEXP (x, 1), mode)
5296 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5297 result--;
5298
5299 return result;
5300
5301 case MOD:
5302 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5303 known_x, known_mode, known_ret);
5304 if (result > 1
5305 && (bitwidth > HOST_BITS_PER_WIDE_INT
5306 || (nonzero_bits (XEXP (x, 1), mode)
5307 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5308 result--;
5309
5310 return result;
5311
5312 case ASHIFTRT:
5313 /* Shifts by a constant add to the number of bits equal to the
5314 sign bit. */
5315 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5316 known_x, known_mode, known_ret);
5317 if (CONST_INT_P (XEXP (x, 1))
5318 && INTVAL (XEXP (x, 1)) > 0
5319 && INTVAL (XEXP (x, 1)) < xmode_width)
5320 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5321
5322 return num0;
5323
5324 case ASHIFT:
5325 /* Left shifts destroy copies. */
5326 if (!CONST_INT_P (XEXP (x, 1))
5327 || INTVAL (XEXP (x, 1)) < 0
5328 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5329 || INTVAL (XEXP (x, 1)) >= xmode_width)
5330 return 1;
5331
5332 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5333 known_x, known_mode, known_ret);
5334 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5335
5336 case IF_THEN_ELSE:
5337 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5338 known_x, known_mode, known_ret);
5339 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5340 known_x, known_mode, known_ret);
5341 return MIN (num0, num1);
5342
5343 case EQ: case NE: case GE: case GT: case LE: case LT:
5344 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5345 case GEU: case GTU: case LEU: case LTU:
5346 case UNORDERED: case ORDERED:
5347 /* If the constant is negative, take its 1's complement and remask.
5348 Then see how many zero bits we have. */
5349 nonzero = STORE_FLAG_VALUE;
5350 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5351 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5352 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5353
5354 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5355
5356 default:
5357 break;
5358 }
5359
5360 /* If we haven't been able to figure it out by one of the above rules,
5361 see if some of the high-order bits are known to be zero. If so,
5362 count those bits and return one less than that amount. If we can't
5363 safely compute the mask for this mode, always return BITWIDTH. */
5364
5365 bitwidth = GET_MODE_PRECISION (mode);
5366 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5367 return 1;
5368
5369 nonzero = nonzero_bits (x, mode);
5370 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5371 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5372 }
5373
5374 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5375 zero indicates an instruction pattern without a known cost. */
5376
5377 int
5378 pattern_cost (rtx pat, bool speed)
5379 {
5380 int i, cost;
5381 rtx set;
5382
5383 /* Extract the single set rtx from the instruction pattern. We
5384 can't use single_set since we only have the pattern. We also
5385 consider PARALLELs of a normal set and a single comparison. In
5386 that case we use the cost of the non-comparison SET operation,
5387 which is most-likely to be the real cost of this operation. */
5388 if (GET_CODE (pat) == SET)
5389 set = pat;
5390 else if (GET_CODE (pat) == PARALLEL)
5391 {
5392 set = NULL_RTX;
5393 rtx comparison = NULL_RTX;
5394
5395 for (i = 0; i < XVECLEN (pat, 0); i++)
5396 {
5397 rtx x = XVECEXP (pat, 0, i);
5398 if (GET_CODE (x) == SET)
5399 {
5400 if (GET_CODE (SET_SRC (x)) == COMPARE)
5401 {
5402 if (comparison)
5403 return 0;
5404 comparison = x;
5405 }
5406 else
5407 {
5408 if (set)
5409 return 0;
5410 set = x;
5411 }
5412 }
5413 }
5414
5415 if (!set && comparison)
5416 set = comparison;
5417
5418 if (!set)
5419 return 0;
5420 }
5421 else
5422 return 0;
5423
5424 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5425 return cost > 0 ? cost : COSTS_N_INSNS (1);
5426 }
5427
5428 /* Calculate the cost of a single instruction. A return value of zero
5429 indicates an instruction pattern without a known cost. */
5430
5431 int
5432 insn_cost (rtx_insn *insn, bool speed)
5433 {
5434 if (targetm.insn_cost)
5435 return targetm.insn_cost (insn, speed);
5436
5437 return pattern_cost (PATTERN (insn), speed);
5438 }
5439
5440 /* Returns estimate on cost of computing SEQ. */
5441
5442 unsigned
5443 seq_cost (const rtx_insn *seq, bool speed)
5444 {
5445 unsigned cost = 0;
5446 rtx set;
5447
5448 for (; seq; seq = NEXT_INSN (seq))
5449 {
5450 set = single_set (seq);
5451 if (set)
5452 cost += set_rtx_cost (set, speed);
5453 else if (NONDEBUG_INSN_P (seq))
5454 {
5455 int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5456 if (this_cost > 0)
5457 cost += this_cost;
5458 else
5459 cost++;
5460 }
5461 }
5462
5463 return cost;
5464 }
5465
5466 /* Given an insn INSN and condition COND, return the condition in a
5467 canonical form to simplify testing by callers. Specifically:
5468
5469 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5470 (2) Both operands will be machine operands; (cc0) will have been replaced.
5471 (3) If an operand is a constant, it will be the second operand.
5472 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5473 for GE, GEU, and LEU.
5474
5475 If the condition cannot be understood, or is an inequality floating-point
5476 comparison which needs to be reversed, 0 will be returned.
5477
5478 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5479
5480 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5481 insn used in locating the condition was found. If a replacement test
5482 of the condition is desired, it should be placed in front of that
5483 insn and we will be sure that the inputs are still valid.
5484
5485 If WANT_REG is nonzero, we wish the condition to be relative to that
5486 register, if possible. Therefore, do not canonicalize the condition
5487 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5488 to be a compare to a CC mode register.
5489
5490 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5491 and at INSN. */
5492
5493 rtx
5494 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5495 rtx_insn **earliest,
5496 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5497 {
5498 enum rtx_code code;
5499 rtx_insn *prev = insn;
5500 const_rtx set;
5501 rtx tem;
5502 rtx op0, op1;
5503 int reverse_code = 0;
5504 machine_mode mode;
5505 basic_block bb = BLOCK_FOR_INSN (insn);
5506
5507 code = GET_CODE (cond);
5508 mode = GET_MODE (cond);
5509 op0 = XEXP (cond, 0);
5510 op1 = XEXP (cond, 1);
5511
5512 if (reverse)
5513 code = reversed_comparison_code (cond, insn);
5514 if (code == UNKNOWN)
5515 return 0;
5516
5517 if (earliest)
5518 *earliest = insn;
5519
5520 /* If we are comparing a register with zero, see if the register is set
5521 in the previous insn to a COMPARE or a comparison operation. Perform
5522 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5523 in cse.c */
5524
5525 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5526 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5527 && op1 == CONST0_RTX (GET_MODE (op0))
5528 && op0 != want_reg)
5529 {
5530 /* Set nonzero when we find something of interest. */
5531 rtx x = 0;
5532
5533 /* If comparison with cc0, import actual comparison from compare
5534 insn. */
5535 if (op0 == cc0_rtx)
5536 {
5537 if ((prev = prev_nonnote_insn (prev)) == 0
5538 || !NONJUMP_INSN_P (prev)
5539 || (set = single_set (prev)) == 0
5540 || SET_DEST (set) != cc0_rtx)
5541 return 0;
5542
5543 op0 = SET_SRC (set);
5544 op1 = CONST0_RTX (GET_MODE (op0));
5545 if (earliest)
5546 *earliest = prev;
5547 }
5548
5549 /* If this is a COMPARE, pick up the two things being compared. */
5550 if (GET_CODE (op0) == COMPARE)
5551 {
5552 op1 = XEXP (op0, 1);
5553 op0 = XEXP (op0, 0);
5554 continue;
5555 }
5556 else if (!REG_P (op0))
5557 break;
5558
5559 /* Go back to the previous insn. Stop if it is not an INSN. We also
5560 stop if it isn't a single set or if it has a REG_INC note because
5561 we don't want to bother dealing with it. */
5562
5563 prev = prev_nonnote_nondebug_insn (prev);
5564
5565 if (prev == 0
5566 || !NONJUMP_INSN_P (prev)
5567 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5568 /* In cfglayout mode, there do not have to be labels at the
5569 beginning of a block, or jumps at the end, so the previous
5570 conditions would not stop us when we reach bb boundary. */
5571 || BLOCK_FOR_INSN (prev) != bb)
5572 break;
5573
5574 set = set_of (op0, prev);
5575
5576 if (set
5577 && (GET_CODE (set) != SET
5578 || !rtx_equal_p (SET_DEST (set), op0)))
5579 break;
5580
5581 /* If this is setting OP0, get what it sets it to if it looks
5582 relevant. */
5583 if (set)
5584 {
5585 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5586 #ifdef FLOAT_STORE_FLAG_VALUE
5587 REAL_VALUE_TYPE fsfv;
5588 #endif
5589
5590 /* ??? We may not combine comparisons done in a CCmode with
5591 comparisons not done in a CCmode. This is to aid targets
5592 like Alpha that have an IEEE compliant EQ instruction, and
5593 a non-IEEE compliant BEQ instruction. The use of CCmode is
5594 actually artificial, simply to prevent the combination, but
5595 should not affect other platforms.
5596
5597 However, we must allow VOIDmode comparisons to match either
5598 CCmode or non-CCmode comparison, because some ports have
5599 modeless comparisons inside branch patterns.
5600
5601 ??? This mode check should perhaps look more like the mode check
5602 in simplify_comparison in combine. */
5603 if (((GET_MODE_CLASS (mode) == MODE_CC)
5604 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5605 && mode != VOIDmode
5606 && inner_mode != VOIDmode)
5607 break;
5608 if (GET_CODE (SET_SRC (set)) == COMPARE
5609 || (((code == NE
5610 || (code == LT
5611 && val_signbit_known_set_p (inner_mode,
5612 STORE_FLAG_VALUE))
5613 #ifdef FLOAT_STORE_FLAG_VALUE
5614 || (code == LT
5615 && SCALAR_FLOAT_MODE_P (inner_mode)
5616 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5617 REAL_VALUE_NEGATIVE (fsfv)))
5618 #endif
5619 ))
5620 && COMPARISON_P (SET_SRC (set))))
5621 x = SET_SRC (set);
5622 else if (((code == EQ
5623 || (code == GE
5624 && val_signbit_known_set_p (inner_mode,
5625 STORE_FLAG_VALUE))
5626 #ifdef FLOAT_STORE_FLAG_VALUE
5627 || (code == GE
5628 && SCALAR_FLOAT_MODE_P (inner_mode)
5629 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5630 REAL_VALUE_NEGATIVE (fsfv)))
5631 #endif
5632 ))
5633 && COMPARISON_P (SET_SRC (set)))
5634 {
5635 reverse_code = 1;
5636 x = SET_SRC (set);
5637 }
5638 else if ((code == EQ || code == NE)
5639 && GET_CODE (SET_SRC (set)) == XOR)
5640 /* Handle sequences like:
5641
5642 (set op0 (xor X Y))
5643 ...(eq|ne op0 (const_int 0))...
5644
5645 in which case:
5646
5647 (eq op0 (const_int 0)) reduces to (eq X Y)
5648 (ne op0 (const_int 0)) reduces to (ne X Y)
5649
5650 This is the form used by MIPS16, for example. */
5651 x = SET_SRC (set);
5652 else
5653 break;
5654 }
5655
5656 else if (reg_set_p (op0, prev))
5657 /* If this sets OP0, but not directly, we have to give up. */
5658 break;
5659
5660 if (x)
5661 {
5662 /* If the caller is expecting the condition to be valid at INSN,
5663 make sure X doesn't change before INSN. */
5664 if (valid_at_insn_p)
5665 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5666 break;
5667 if (COMPARISON_P (x))
5668 code = GET_CODE (x);
5669 if (reverse_code)
5670 {
5671 code = reversed_comparison_code (x, prev);
5672 if (code == UNKNOWN)
5673 return 0;
5674 reverse_code = 0;
5675 }
5676
5677 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5678 if (earliest)
5679 *earliest = prev;
5680 }
5681 }
5682
5683 /* If constant is first, put it last. */
5684 if (CONSTANT_P (op0))
5685 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5686
5687 /* If OP0 is the result of a comparison, we weren't able to find what
5688 was really being compared, so fail. */
5689 if (!allow_cc_mode
5690 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5691 return 0;
5692
5693 /* Canonicalize any ordered comparison with integers involving equality
5694 if we can do computations in the relevant mode and we do not
5695 overflow. */
5696
5697 scalar_int_mode op0_mode;
5698 if (CONST_INT_P (op1)
5699 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5700 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5701 {
5702 HOST_WIDE_INT const_val = INTVAL (op1);
5703 unsigned HOST_WIDE_INT uconst_val = const_val;
5704 unsigned HOST_WIDE_INT max_val
5705 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5706
5707 switch (code)
5708 {
5709 case LE:
5710 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5711 code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5712 break;
5713
5714 /* When cross-compiling, const_val might be sign-extended from
5715 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5716 case GE:
5717 if ((const_val & max_val)
5718 != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5719 code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5720 break;
5721
5722 case LEU:
5723 if (uconst_val < max_val)
5724 code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5725 break;
5726
5727 case GEU:
5728 if (uconst_val != 0)
5729 code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5730 break;
5731
5732 default:
5733 break;
5734 }
5735 }
5736
5737 /* Never return CC0; return zero instead. */
5738 if (CC0_P (op0))
5739 return 0;
5740
5741 /* We promised to return a comparison. */
5742 rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5743 if (COMPARISON_P (ret))
5744 return ret;
5745 return 0;
5746 }
5747
5748 /* Given a jump insn JUMP, return the condition that will cause it to branch
5749 to its JUMP_LABEL. If the condition cannot be understood, or is an
5750 inequality floating-point comparison which needs to be reversed, 0 will
5751 be returned.
5752
5753 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5754 insn used in locating the condition was found. If a replacement test
5755 of the condition is desired, it should be placed in front of that
5756 insn and we will be sure that the inputs are still valid. If EARLIEST
5757 is null, the returned condition will be valid at INSN.
5758
5759 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5760 compare CC mode register.
5761
5762 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5763
5764 rtx
5765 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5766 int valid_at_insn_p)
5767 {
5768 rtx cond;
5769 int reverse;
5770 rtx set;
5771
5772 /* If this is not a standard conditional jump, we can't parse it. */
5773 if (!JUMP_P (jump)
5774 || ! any_condjump_p (jump))
5775 return 0;
5776 set = pc_set (jump);
5777
5778 cond = XEXP (SET_SRC (set), 0);
5779
5780 /* If this branches to JUMP_LABEL when the condition is false, reverse
5781 the condition. */
5782 reverse
5783 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5784 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5785
5786 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5787 allow_cc_mode, valid_at_insn_p);
5788 }
5789
5790 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5791 TARGET_MODE_REP_EXTENDED.
5792
5793 Note that we assume that the property of
5794 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5795 narrower than mode B. I.e., if A is a mode narrower than B then in
5796 order to be able to operate on it in mode B, mode A needs to
5797 satisfy the requirements set by the representation of mode B. */
5798
5799 static void
5800 init_num_sign_bit_copies_in_rep (void)
5801 {
5802 opt_scalar_int_mode in_mode_iter;
5803 scalar_int_mode mode;
5804
5805 FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5806 FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5807 {
5808 scalar_int_mode in_mode = in_mode_iter.require ();
5809 scalar_int_mode i;
5810
5811 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5812 extends to the next widest mode. */
5813 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5814 || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5815
5816 /* We are in in_mode. Count how many bits outside of mode
5817 have to be copies of the sign-bit. */
5818 FOR_EACH_MODE (i, mode, in_mode)
5819 {
5820 /* This must always exist (for the last iteration it will be
5821 IN_MODE). */
5822 scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5823
5824 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5825 /* We can only check sign-bit copies starting from the
5826 top-bit. In order to be able to check the bits we
5827 have already seen we pretend that subsequent bits
5828 have to be sign-bit copies too. */
5829 || num_sign_bit_copies_in_rep [in_mode][mode])
5830 num_sign_bit_copies_in_rep [in_mode][mode]
5831 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5832 }
5833 }
5834 }
5835
5836 /* Suppose that truncation from the machine mode of X to MODE is not a
5837 no-op. See if there is anything special about X so that we can
5838 assume it already contains a truncated value of MODE. */
5839
5840 bool
5841 truncated_to_mode (machine_mode mode, const_rtx x)
5842 {
5843 /* This register has already been used in MODE without explicit
5844 truncation. */
5845 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5846 return true;
5847
5848 /* See if we already satisfy the requirements of MODE. If yes we
5849 can just switch to MODE. */
5850 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5851 && (num_sign_bit_copies (x, GET_MODE (x))
5852 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5853 return true;
5854
5855 return false;
5856 }
5857 \f
5858 /* Return true if RTX code CODE has a single sequence of zero or more
5859 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5860 entry in that case. */
5861
5862 static bool
5863 setup_reg_subrtx_bounds (unsigned int code)
5864 {
5865 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5866 unsigned int i = 0;
5867 for (; format[i] != 'e'; ++i)
5868 {
5869 if (!format[i])
5870 /* No subrtxes. Leave start and count as 0. */
5871 return true;
5872 if (format[i] == 'E' || format[i] == 'V')
5873 return false;
5874 }
5875
5876 /* Record the sequence of 'e's. */
5877 rtx_all_subrtx_bounds[code].start = i;
5878 do
5879 ++i;
5880 while (format[i] == 'e');
5881 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5882 /* rtl-iter.h relies on this. */
5883 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5884
5885 for (; format[i]; ++i)
5886 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5887 return false;
5888
5889 return true;
5890 }
5891
5892 /* Initialize rtx_all_subrtx_bounds. */
5893 void
5894 init_rtlanal (void)
5895 {
5896 int i;
5897 for (i = 0; i < NUM_RTX_CODE; i++)
5898 {
5899 if (!setup_reg_subrtx_bounds (i))
5900 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5901 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5902 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5903 }
5904
5905 init_num_sign_bit_copies_in_rep ();
5906 }
5907 \f
5908 /* Check whether this is a constant pool constant. */
5909 bool
5910 constant_pool_constant_p (rtx x)
5911 {
5912 x = avoid_constant_pool_reference (x);
5913 return CONST_DOUBLE_P (x);
5914 }
5915 \f
5916 /* If M is a bitmask that selects a field of low-order bits within an item but
5917 not the entire word, return the length of the field. Return -1 otherwise.
5918 M is used in machine mode MODE. */
5919
5920 int
5921 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5922 {
5923 if (mode != VOIDmode)
5924 {
5925 if (!HWI_COMPUTABLE_MODE_P (mode))
5926 return -1;
5927 m &= GET_MODE_MASK (mode);
5928 }
5929
5930 return exact_log2 (m + 1);
5931 }
5932
5933 /* Return the mode of MEM's address. */
5934
5935 scalar_int_mode
5936 get_address_mode (rtx mem)
5937 {
5938 machine_mode mode;
5939
5940 gcc_assert (MEM_P (mem));
5941 mode = GET_MODE (XEXP (mem, 0));
5942 if (mode != VOIDmode)
5943 return as_a <scalar_int_mode> (mode);
5944 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5945 }
5946 \f
5947 /* Split up a CONST_DOUBLE or integer constant rtx
5948 into two rtx's for single words,
5949 storing in *FIRST the word that comes first in memory in the target
5950 and in *SECOND the other.
5951
5952 TODO: This function needs to be rewritten to work on any size
5953 integer. */
5954
5955 void
5956 split_double (rtx value, rtx *first, rtx *second)
5957 {
5958 if (CONST_INT_P (value))
5959 {
5960 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5961 {
5962 /* In this case the CONST_INT holds both target words.
5963 Extract the bits from it into two word-sized pieces.
5964 Sign extend each half to HOST_WIDE_INT. */
5965 unsigned HOST_WIDE_INT low, high;
5966 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5967 unsigned bits_per_word = BITS_PER_WORD;
5968
5969 /* Set sign_bit to the most significant bit of a word. */
5970 sign_bit = 1;
5971 sign_bit <<= bits_per_word - 1;
5972
5973 /* Set mask so that all bits of the word are set. We could
5974 have used 1 << BITS_PER_WORD instead of basing the
5975 calculation on sign_bit. However, on machines where
5976 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5977 compiler warning, even though the code would never be
5978 executed. */
5979 mask = sign_bit << 1;
5980 mask--;
5981
5982 /* Set sign_extend as any remaining bits. */
5983 sign_extend = ~mask;
5984
5985 /* Pick the lower word and sign-extend it. */
5986 low = INTVAL (value);
5987 low &= mask;
5988 if (low & sign_bit)
5989 low |= sign_extend;
5990
5991 /* Pick the higher word, shifted to the least significant
5992 bits, and sign-extend it. */
5993 high = INTVAL (value);
5994 high >>= bits_per_word - 1;
5995 high >>= 1;
5996 high &= mask;
5997 if (high & sign_bit)
5998 high |= sign_extend;
5999
6000 /* Store the words in the target machine order. */
6001 if (WORDS_BIG_ENDIAN)
6002 {
6003 *first = GEN_INT (high);
6004 *second = GEN_INT (low);
6005 }
6006 else
6007 {
6008 *first = GEN_INT (low);
6009 *second = GEN_INT (high);
6010 }
6011 }
6012 else
6013 {
6014 /* The rule for using CONST_INT for a wider mode
6015 is that we regard the value as signed.
6016 So sign-extend it. */
6017 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6018 if (WORDS_BIG_ENDIAN)
6019 {
6020 *first = high;
6021 *second = value;
6022 }
6023 else
6024 {
6025 *first = value;
6026 *second = high;
6027 }
6028 }
6029 }
6030 else if (GET_CODE (value) == CONST_WIDE_INT)
6031 {
6032 /* All of this is scary code and needs to be converted to
6033 properly work with any size integer. */
6034 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6035 if (WORDS_BIG_ENDIAN)
6036 {
6037 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6038 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6039 }
6040 else
6041 {
6042 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6043 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6044 }
6045 }
6046 else if (!CONST_DOUBLE_P (value))
6047 {
6048 if (WORDS_BIG_ENDIAN)
6049 {
6050 *first = const0_rtx;
6051 *second = value;
6052 }
6053 else
6054 {
6055 *first = value;
6056 *second = const0_rtx;
6057 }
6058 }
6059 else if (GET_MODE (value) == VOIDmode
6060 /* This is the old way we did CONST_DOUBLE integers. */
6061 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6062 {
6063 /* In an integer, the words are defined as most and least significant.
6064 So order them by the target's convention. */
6065 if (WORDS_BIG_ENDIAN)
6066 {
6067 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6068 *second = GEN_INT (CONST_DOUBLE_LOW (value));
6069 }
6070 else
6071 {
6072 *first = GEN_INT (CONST_DOUBLE_LOW (value));
6073 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6074 }
6075 }
6076 else
6077 {
6078 long l[2];
6079
6080 /* Note, this converts the REAL_VALUE_TYPE to the target's
6081 format, splits up the floating point double and outputs
6082 exactly 32 bits of it into each of l[0] and l[1] --
6083 not necessarily BITS_PER_WORD bits. */
6084 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6085
6086 /* If 32 bits is an entire word for the target, but not for the host,
6087 then sign-extend on the host so that the number will look the same
6088 way on the host that it would on the target. See for instance
6089 simplify_unary_operation. The #if is needed to avoid compiler
6090 warnings. */
6091
6092 #if HOST_BITS_PER_LONG > 32
6093 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6094 {
6095 if (l[0] & ((long) 1 << 31))
6096 l[0] |= ((unsigned long) (-1) << 32);
6097 if (l[1] & ((long) 1 << 31))
6098 l[1] |= ((unsigned long) (-1) << 32);
6099 }
6100 #endif
6101
6102 *first = GEN_INT (l[0]);
6103 *second = GEN_INT (l[1]);
6104 }
6105 }
6106
6107 /* Return true if X is a sign_extract or zero_extract from the least
6108 significant bit. */
6109
6110 static bool
6111 lsb_bitfield_op_p (rtx x)
6112 {
6113 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6114 {
6115 machine_mode mode = GET_MODE (XEXP (x, 0));
6116 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6117 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6118 poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6119
6120 return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6121 }
6122 return false;
6123 }
6124
6125 /* Strip outer address "mutations" from LOC and return a pointer to the
6126 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6127 stripped expression there.
6128
6129 "Mutations" either convert between modes or apply some kind of
6130 extension, truncation or alignment. */
6131
6132 rtx *
6133 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6134 {
6135 for (;;)
6136 {
6137 enum rtx_code code = GET_CODE (*loc);
6138 if (GET_RTX_CLASS (code) == RTX_UNARY)
6139 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6140 used to convert between pointer sizes. */
6141 loc = &XEXP (*loc, 0);
6142 else if (lsb_bitfield_op_p (*loc))
6143 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6144 acts as a combined truncation and extension. */
6145 loc = &XEXP (*loc, 0);
6146 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6147 /* (and ... (const_int -X)) is used to align to X bytes. */
6148 loc = &XEXP (*loc, 0);
6149 else if (code == SUBREG
6150 && !OBJECT_P (SUBREG_REG (*loc))
6151 && subreg_lowpart_p (*loc))
6152 /* (subreg (operator ...) ...) inside and is used for mode
6153 conversion too. */
6154 loc = &SUBREG_REG (*loc);
6155 else
6156 return loc;
6157 if (outer_code)
6158 *outer_code = code;
6159 }
6160 }
6161
6162 /* Return true if CODE applies some kind of scale. The scaled value is
6163 is the first operand and the scale is the second. */
6164
6165 static bool
6166 binary_scale_code_p (enum rtx_code code)
6167 {
6168 return (code == MULT
6169 || code == ASHIFT
6170 /* Needed by ARM targets. */
6171 || code == ASHIFTRT
6172 || code == LSHIFTRT
6173 || code == ROTATE
6174 || code == ROTATERT);
6175 }
6176
6177 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6178 (see address_info). Return null otherwise. */
6179
6180 static rtx *
6181 get_base_term (rtx *inner)
6182 {
6183 if (GET_CODE (*inner) == LO_SUM)
6184 inner = strip_address_mutations (&XEXP (*inner, 0));
6185 if (REG_P (*inner)
6186 || MEM_P (*inner)
6187 || GET_CODE (*inner) == SUBREG
6188 || GET_CODE (*inner) == SCRATCH)
6189 return inner;
6190 return 0;
6191 }
6192
6193 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6194 (see address_info). Return null otherwise. */
6195
6196 static rtx *
6197 get_index_term (rtx *inner)
6198 {
6199 /* At present, only constant scales are allowed. */
6200 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6201 inner = strip_address_mutations (&XEXP (*inner, 0));
6202 if (REG_P (*inner)
6203 || MEM_P (*inner)
6204 || GET_CODE (*inner) == SUBREG
6205 || GET_CODE (*inner) == SCRATCH)
6206 return inner;
6207 return 0;
6208 }
6209
6210 /* Set the segment part of address INFO to LOC, given that INNER is the
6211 unmutated value. */
6212
6213 static void
6214 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6215 {
6216 gcc_assert (!info->segment);
6217 info->segment = loc;
6218 info->segment_term = inner;
6219 }
6220
6221 /* Set the base part of address INFO to LOC, given that INNER is the
6222 unmutated value. */
6223
6224 static void
6225 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6226 {
6227 gcc_assert (!info->base);
6228 info->base = loc;
6229 info->base_term = inner;
6230 }
6231
6232 /* Set the index part of address INFO to LOC, given that INNER is the
6233 unmutated value. */
6234
6235 static void
6236 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6237 {
6238 gcc_assert (!info->index);
6239 info->index = loc;
6240 info->index_term = inner;
6241 }
6242
6243 /* Set the displacement part of address INFO to LOC, given that INNER
6244 is the constant term. */
6245
6246 static void
6247 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6248 {
6249 gcc_assert (!info->disp);
6250 info->disp = loc;
6251 info->disp_term = inner;
6252 }
6253
6254 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6255 rest of INFO accordingly. */
6256
6257 static void
6258 decompose_incdec_address (struct address_info *info)
6259 {
6260 info->autoinc_p = true;
6261
6262 rtx *base = &XEXP (*info->inner, 0);
6263 set_address_base (info, base, base);
6264 gcc_checking_assert (info->base == info->base_term);
6265
6266 /* These addresses are only valid when the size of the addressed
6267 value is known. */
6268 gcc_checking_assert (info->mode != VOIDmode);
6269 }
6270
6271 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6272 of INFO accordingly. */
6273
6274 static void
6275 decompose_automod_address (struct address_info *info)
6276 {
6277 info->autoinc_p = true;
6278
6279 rtx *base = &XEXP (*info->inner, 0);
6280 set_address_base (info, base, base);
6281 gcc_checking_assert (info->base == info->base_term);
6282
6283 rtx plus = XEXP (*info->inner, 1);
6284 gcc_assert (GET_CODE (plus) == PLUS);
6285
6286 info->base_term2 = &XEXP (plus, 0);
6287 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6288
6289 rtx *step = &XEXP (plus, 1);
6290 rtx *inner_step = strip_address_mutations (step);
6291 if (CONSTANT_P (*inner_step))
6292 set_address_disp (info, step, inner_step);
6293 else
6294 set_address_index (info, step, inner_step);
6295 }
6296
6297 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6298 values in [PTR, END). Return a pointer to the end of the used array. */
6299
6300 static rtx **
6301 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6302 {
6303 rtx x = *loc;
6304 if (GET_CODE (x) == PLUS)
6305 {
6306 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6307 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6308 }
6309 else
6310 {
6311 gcc_assert (ptr != end);
6312 *ptr++ = loc;
6313 }
6314 return ptr;
6315 }
6316
6317 /* Evaluate the likelihood of X being a base or index value, returning
6318 positive if it is likely to be a base, negative if it is likely to be
6319 an index, and 0 if we can't tell. Make the magnitude of the return
6320 value reflect the amount of confidence we have in the answer.
6321
6322 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6323
6324 static int
6325 baseness (rtx x, machine_mode mode, addr_space_t as,
6326 enum rtx_code outer_code, enum rtx_code index_code)
6327 {
6328 /* Believe *_POINTER unless the address shape requires otherwise. */
6329 if (REG_P (x) && REG_POINTER (x))
6330 return 2;
6331 if (MEM_P (x) && MEM_POINTER (x))
6332 return 2;
6333
6334 if (REG_P (x) && HARD_REGISTER_P (x))
6335 {
6336 /* X is a hard register. If it only fits one of the base
6337 or index classes, choose that interpretation. */
6338 int regno = REGNO (x);
6339 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6340 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6341 if (base_p != index_p)
6342 return base_p ? 1 : -1;
6343 }
6344 return 0;
6345 }
6346
6347 /* INFO->INNER describes a normal, non-automodified address.
6348 Fill in the rest of INFO accordingly. */
6349
6350 static void
6351 decompose_normal_address (struct address_info *info)
6352 {
6353 /* Treat the address as the sum of up to four values. */
6354 rtx *ops[4];
6355 size_t n_ops = extract_plus_operands (info->inner, ops,
6356 ops + ARRAY_SIZE (ops)) - ops;
6357
6358 /* If there is more than one component, any base component is in a PLUS. */
6359 if (n_ops > 1)
6360 info->base_outer_code = PLUS;
6361
6362 /* Try to classify each sum operand now. Leave those that could be
6363 either a base or an index in OPS. */
6364 rtx *inner_ops[4];
6365 size_t out = 0;
6366 for (size_t in = 0; in < n_ops; ++in)
6367 {
6368 rtx *loc = ops[in];
6369 rtx *inner = strip_address_mutations (loc);
6370 if (CONSTANT_P (*inner))
6371 set_address_disp (info, loc, inner);
6372 else if (GET_CODE (*inner) == UNSPEC)
6373 set_address_segment (info, loc, inner);
6374 else
6375 {
6376 /* The only other possibilities are a base or an index. */
6377 rtx *base_term = get_base_term (inner);
6378 rtx *index_term = get_index_term (inner);
6379 gcc_assert (base_term || index_term);
6380 if (!base_term)
6381 set_address_index (info, loc, index_term);
6382 else if (!index_term)
6383 set_address_base (info, loc, base_term);
6384 else
6385 {
6386 gcc_assert (base_term == index_term);
6387 ops[out] = loc;
6388 inner_ops[out] = base_term;
6389 ++out;
6390 }
6391 }
6392 }
6393
6394 /* Classify the remaining OPS members as bases and indexes. */
6395 if (out == 1)
6396 {
6397 /* If we haven't seen a base or an index yet, assume that this is
6398 the base. If we were confident that another term was the base
6399 or index, treat the remaining operand as the other kind. */
6400 if (!info->base)
6401 set_address_base (info, ops[0], inner_ops[0]);
6402 else
6403 set_address_index (info, ops[0], inner_ops[0]);
6404 }
6405 else if (out == 2)
6406 {
6407 /* In the event of a tie, assume the base comes first. */
6408 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6409 GET_CODE (*ops[1]))
6410 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6411 GET_CODE (*ops[0])))
6412 {
6413 set_address_base (info, ops[0], inner_ops[0]);
6414 set_address_index (info, ops[1], inner_ops[1]);
6415 }
6416 else
6417 {
6418 set_address_base (info, ops[1], inner_ops[1]);
6419 set_address_index (info, ops[0], inner_ops[0]);
6420 }
6421 }
6422 else
6423 gcc_assert (out == 0);
6424 }
6425
6426 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6427 or VOIDmode if not known. AS is the address space associated with LOC.
6428 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6429
6430 void
6431 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6432 addr_space_t as, enum rtx_code outer_code)
6433 {
6434 memset (info, 0, sizeof (*info));
6435 info->mode = mode;
6436 info->as = as;
6437 info->addr_outer_code = outer_code;
6438 info->outer = loc;
6439 info->inner = strip_address_mutations (loc, &outer_code);
6440 info->base_outer_code = outer_code;
6441 switch (GET_CODE (*info->inner))
6442 {
6443 case PRE_DEC:
6444 case PRE_INC:
6445 case POST_DEC:
6446 case POST_INC:
6447 decompose_incdec_address (info);
6448 break;
6449
6450 case PRE_MODIFY:
6451 case POST_MODIFY:
6452 decompose_automod_address (info);
6453 break;
6454
6455 default:
6456 decompose_normal_address (info);
6457 break;
6458 }
6459 }
6460
6461 /* Describe address operand LOC in INFO. */
6462
6463 void
6464 decompose_lea_address (struct address_info *info, rtx *loc)
6465 {
6466 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6467 }
6468
6469 /* Describe the address of MEM X in INFO. */
6470
6471 void
6472 decompose_mem_address (struct address_info *info, rtx x)
6473 {
6474 gcc_assert (MEM_P (x));
6475 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6476 MEM_ADDR_SPACE (x), MEM);
6477 }
6478
6479 /* Update INFO after a change to the address it describes. */
6480
6481 void
6482 update_address (struct address_info *info)
6483 {
6484 decompose_address (info, info->outer, info->mode, info->as,
6485 info->addr_outer_code);
6486 }
6487
6488 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6489 more complicated than that. */
6490
6491 HOST_WIDE_INT
6492 get_index_scale (const struct address_info *info)
6493 {
6494 rtx index = *info->index;
6495 if (GET_CODE (index) == MULT
6496 && CONST_INT_P (XEXP (index, 1))
6497 && info->index_term == &XEXP (index, 0))
6498 return INTVAL (XEXP (index, 1));
6499
6500 if (GET_CODE (index) == ASHIFT
6501 && CONST_INT_P (XEXP (index, 1))
6502 && info->index_term == &XEXP (index, 0))
6503 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6504
6505 if (info->index == info->index_term)
6506 return 1;
6507
6508 return 0;
6509 }
6510
6511 /* Return the "index code" of INFO, in the form required by
6512 ok_for_base_p_1. */
6513
6514 enum rtx_code
6515 get_index_code (const struct address_info *info)
6516 {
6517 if (info->index)
6518 return GET_CODE (*info->index);
6519
6520 if (info->disp)
6521 return GET_CODE (*info->disp);
6522
6523 return SCRATCH;
6524 }
6525
6526 /* Return true if RTL X contains a SYMBOL_REF. */
6527
6528 bool
6529 contains_symbol_ref_p (const_rtx x)
6530 {
6531 subrtx_iterator::array_type array;
6532 FOR_EACH_SUBRTX (iter, array, x, ALL)
6533 if (SYMBOL_REF_P (*iter))
6534 return true;
6535
6536 return false;
6537 }
6538
6539 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6540
6541 bool
6542 contains_symbolic_reference_p (const_rtx x)
6543 {
6544 subrtx_iterator::array_type array;
6545 FOR_EACH_SUBRTX (iter, array, x, ALL)
6546 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6547 return true;
6548
6549 return false;
6550 }
6551
6552 /* Return true if X contains a thread-local symbol. */
6553
6554 bool
6555 tls_referenced_p (const_rtx x)
6556 {
6557 if (!targetm.have_tls)
6558 return false;
6559
6560 subrtx_iterator::array_type array;
6561 FOR_EACH_SUBRTX (iter, array, x, ALL)
6562 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6563 return true;
6564 return false;
6565 }
6566
6567 /* Return true if reg REGNO with mode REG_MODE would be clobbered by the
6568 clobber_high operand in CLOBBER_HIGH_OP. */
6569
6570 bool
6571 reg_is_clobbered_by_clobber_high (unsigned int regno, machine_mode reg_mode,
6572 const_rtx clobber_high_op)
6573 {
6574 unsigned int clobber_regno = REGNO (clobber_high_op);
6575 machine_mode clobber_mode = GET_MODE (clobber_high_op);
6576 unsigned char regno_nregs = hard_regno_nregs (regno, reg_mode);
6577
6578 /* Clobber high should always span exactly one register. */
6579 gcc_assert (REG_NREGS (clobber_high_op) == 1);
6580
6581 /* Clobber high needs to match with one of the registers in X. */
6582 if (clobber_regno < regno || clobber_regno >= regno + regno_nregs)
6583 return false;
6584
6585 gcc_assert (reg_mode != BLKmode && clobber_mode != BLKmode);
6586
6587 if (reg_mode == VOIDmode)
6588 return clobber_mode != VOIDmode;
6589
6590 /* Clobber high will clobber if its size might be greater than the size of
6591 register regno. */
6592 return maybe_gt (exact_div (GET_MODE_SIZE (reg_mode), regno_nregs),
6593 GET_MODE_SIZE (clobber_mode));
6594 }