Add a function for getting the ABI of a call insn target
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 #include "hard-reg-set.h"
39
40 /* Forward declarations */
41 static void set_of_1 (rtx, const_rtx, void *);
42 static bool covers_regno_p (const_rtx, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
44 static int computed_jump_p_1 (const_rtx);
45 static void parms_set (rtx, const_rtx, void *);
46
47 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
48 const_rtx, machine_mode,
49 unsigned HOST_WIDE_INT);
50 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
51 const_rtx, machine_mode,
52 unsigned HOST_WIDE_INT);
53 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
54 const_rtx, machine_mode,
55 unsigned int);
56 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
57 const_rtx, machine_mode,
58 unsigned int);
59
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
62
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65 SIGN_EXTEND then while narrowing we also have to enforce the
66 representation and sign-extend the value to mode DESTINATION_REP.
67
68 If the value is already sign-extended to DESTINATION_REP mode we
69 can just switch to DESTINATION mode on it. For each pair of
70 integral modes SOURCE and DESTINATION, when truncating from SOURCE
71 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72 contains the number of high-order bits in SOURCE that have to be
73 copies of the sign-bit so that we can do this mode-switch to
74 DESTINATION. */
75
76 static unsigned int
77 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
78 \f
79 /* Store X into index I of ARRAY. ARRAY is known to have at least I
80 elements. Return the new base of ARRAY. */
81
82 template <typename T>
83 typename T::value_type *
84 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
85 value_type *base,
86 size_t i, value_type x)
87 {
88 if (base == array.stack)
89 {
90 if (i < LOCAL_ELEMS)
91 {
92 base[i] = x;
93 return base;
94 }
95 gcc_checking_assert (i == LOCAL_ELEMS);
96 /* A previous iteration might also have moved from the stack to the
97 heap, in which case the heap array will already be big enough. */
98 if (vec_safe_length (array.heap) <= i)
99 vec_safe_grow (array.heap, i + 1);
100 base = array.heap->address ();
101 memcpy (base, array.stack, sizeof (array.stack));
102 base[LOCAL_ELEMS] = x;
103 return base;
104 }
105 unsigned int length = array.heap->length ();
106 if (length > i)
107 {
108 gcc_checking_assert (base == array.heap->address ());
109 base[i] = x;
110 return base;
111 }
112 else
113 {
114 gcc_checking_assert (i == length);
115 vec_safe_push (array.heap, x);
116 return array.heap->address ();
117 }
118 }
119
120 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
121 number of elements added to the worklist. */
122
123 template <typename T>
124 size_t
125 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
126 value_type *base,
127 size_t end, rtx_type x)
128 {
129 enum rtx_code code = GET_CODE (x);
130 const char *format = GET_RTX_FORMAT (code);
131 size_t orig_end = end;
132 if (__builtin_expect (INSN_P (x), false))
133 {
134 /* Put the pattern at the top of the queue, since that's what
135 we're likely to want most. It also allows for the SEQUENCE
136 code below. */
137 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
138 if (format[i] == 'e')
139 {
140 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
141 if (__builtin_expect (end < LOCAL_ELEMS, true))
142 base[end++] = subx;
143 else
144 base = add_single_to_queue (array, base, end++, subx);
145 }
146 }
147 else
148 for (int i = 0; format[i]; ++i)
149 if (format[i] == 'e')
150 {
151 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
152 if (__builtin_expect (end < LOCAL_ELEMS, true))
153 base[end++] = subx;
154 else
155 base = add_single_to_queue (array, base, end++, subx);
156 }
157 else if (format[i] == 'E')
158 {
159 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
160 rtx *vec = x->u.fld[i].rt_rtvec->elem;
161 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
162 for (unsigned int j = 0; j < length; j++)
163 base[end++] = T::get_value (vec[j]);
164 else
165 for (unsigned int j = 0; j < length; j++)
166 base = add_single_to_queue (array, base, end++,
167 T::get_value (vec[j]));
168 if (code == SEQUENCE && end == length)
169 /* If the subrtxes of the sequence fill the entire array then
170 we know that no other parts of a containing insn are queued.
171 The caller is therefore iterating over the sequence as a
172 PATTERN (...), so we also want the patterns of the
173 subinstructions. */
174 for (unsigned int j = 0; j < length; j++)
175 {
176 typename T::rtx_type x = T::get_rtx (base[j]);
177 if (INSN_P (x))
178 base[j] = T::get_value (PATTERN (x));
179 }
180 }
181 return end - orig_end;
182 }
183
184 template <typename T>
185 void
186 generic_subrtx_iterator <T>::free_array (array_type &array)
187 {
188 vec_free (array.heap);
189 }
190
191 template <typename T>
192 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
193
194 template class generic_subrtx_iterator <const_rtx_accessor>;
195 template class generic_subrtx_iterator <rtx_var_accessor>;
196 template class generic_subrtx_iterator <rtx_ptr_accessor>;
197
198 /* Return 1 if the value of X is unstable
199 (would be different at a different point in the program).
200 The frame pointer, arg pointer, etc. are considered stable
201 (within one function) and so is anything marked `unchanging'. */
202
203 int
204 rtx_unstable_p (const_rtx x)
205 {
206 const RTX_CODE code = GET_CODE (x);
207 int i;
208 const char *fmt;
209
210 switch (code)
211 {
212 case MEM:
213 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
214
215 case CONST:
216 CASE_CONST_ANY:
217 case SYMBOL_REF:
218 case LABEL_REF:
219 return 0;
220
221 case REG:
222 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
223 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
224 /* The arg pointer varies if it is not a fixed register. */
225 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
226 return 0;
227 /* ??? When call-clobbered, the value is stable modulo the restore
228 that must happen after a call. This currently screws up local-alloc
229 into believing that the restore is not needed. */
230 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
231 return 0;
232 return 1;
233
234 case ASM_OPERANDS:
235 if (MEM_VOLATILE_P (x))
236 return 1;
237
238 /* Fall through. */
239
240 default:
241 break;
242 }
243
244 fmt = GET_RTX_FORMAT (code);
245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
246 if (fmt[i] == 'e')
247 {
248 if (rtx_unstable_p (XEXP (x, i)))
249 return 1;
250 }
251 else if (fmt[i] == 'E')
252 {
253 int j;
254 for (j = 0; j < XVECLEN (x, i); j++)
255 if (rtx_unstable_p (XVECEXP (x, i, j)))
256 return 1;
257 }
258
259 return 0;
260 }
261
262 /* Return 1 if X has a value that can vary even between two
263 executions of the program. 0 means X can be compared reliably
264 against certain constants or near-constants.
265 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266 zero, we are slightly more conservative.
267 The frame pointer and the arg pointer are considered constant. */
268
269 bool
270 rtx_varies_p (const_rtx x, bool for_alias)
271 {
272 RTX_CODE code;
273 int i;
274 const char *fmt;
275
276 if (!x)
277 return 0;
278
279 code = GET_CODE (x);
280 switch (code)
281 {
282 case MEM:
283 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
284
285 case CONST:
286 CASE_CONST_ANY:
287 case SYMBOL_REF:
288 case LABEL_REF:
289 return 0;
290
291 case REG:
292 /* Note that we have to test for the actual rtx used for the frame
293 and arg pointers and not just the register number in case we have
294 eliminated the frame and/or arg pointer and are using it
295 for pseudos. */
296 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
297 /* The arg pointer varies if it is not a fixed register. */
298 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
299 return 0;
300 if (x == pic_offset_table_rtx
301 /* ??? When call-clobbered, the value is stable modulo the restore
302 that must happen after a call. This currently screws up
303 local-alloc into believing that the restore is not needed, so we
304 must return 0 only if we are called from alias analysis. */
305 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
306 return 0;
307 return 1;
308
309 case LO_SUM:
310 /* The operand 0 of a LO_SUM is considered constant
311 (in fact it is related specifically to operand 1)
312 during alias analysis. */
313 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
314 || rtx_varies_p (XEXP (x, 1), for_alias);
315
316 case ASM_OPERANDS:
317 if (MEM_VOLATILE_P (x))
318 return 1;
319
320 /* Fall through. */
321
322 default:
323 break;
324 }
325
326 fmt = GET_RTX_FORMAT (code);
327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
328 if (fmt[i] == 'e')
329 {
330 if (rtx_varies_p (XEXP (x, i), for_alias))
331 return 1;
332 }
333 else if (fmt[i] == 'E')
334 {
335 int j;
336 for (j = 0; j < XVECLEN (x, i); j++)
337 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
338 return 1;
339 }
340
341 return 0;
342 }
343
344 /* Compute an approximation for the offset between the register
345 FROM and TO for the current function, as it was at the start
346 of the routine. */
347
348 static poly_int64
349 get_initial_register_offset (int from, int to)
350 {
351 static const struct elim_table_t
352 {
353 const int from;
354 const int to;
355 } table[] = ELIMINABLE_REGS;
356 poly_int64 offset1, offset2;
357 unsigned int i, j;
358
359 if (to == from)
360 return 0;
361
362 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
363 is completed, but we need to give at least an estimate for the stack
364 pointer based on the frame size. */
365 if (!epilogue_completed)
366 {
367 offset1 = crtl->outgoing_args_size + get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
369 offset1 = - offset1;
370 #endif
371 if (to == STACK_POINTER_REGNUM)
372 return offset1;
373 else if (from == STACK_POINTER_REGNUM)
374 return - offset1;
375 else
376 return 0;
377 }
378
379 for (i = 0; i < ARRAY_SIZE (table); i++)
380 if (table[i].from == from)
381 {
382 if (table[i].to == to)
383 {
384 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
385 offset1);
386 return offset1;
387 }
388 for (j = 0; j < ARRAY_SIZE (table); j++)
389 {
390 if (table[j].to == to
391 && table[j].from == table[i].to)
392 {
393 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
394 offset1);
395 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
396 offset2);
397 return offset1 + offset2;
398 }
399 if (table[j].from == to
400 && table[j].to == table[i].to)
401 {
402 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
403 offset1);
404 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
405 offset2);
406 return offset1 - offset2;
407 }
408 }
409 }
410 else if (table[i].to == from)
411 {
412 if (table[i].from == to)
413 {
414 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
415 offset1);
416 return - offset1;
417 }
418 for (j = 0; j < ARRAY_SIZE (table); j++)
419 {
420 if (table[j].to == to
421 && table[j].from == table[i].from)
422 {
423 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
424 offset1);
425 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
426 offset2);
427 return - offset1 + offset2;
428 }
429 if (table[j].from == to
430 && table[j].to == table[i].from)
431 {
432 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
433 offset1);
434 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
435 offset2);
436 return - offset1 - offset2;
437 }
438 }
439 }
440
441 /* If the requested register combination was not found,
442 try a different more simple combination. */
443 if (from == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
445 else if (to == ARG_POINTER_REGNUM)
446 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
447 else if (from == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
449 else if (to == HARD_FRAME_POINTER_REGNUM)
450 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
451 else
452 return 0;
453 }
454
455 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
456 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
457 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
458 references on strict alignment machines. */
459
460 static int
461 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
462 machine_mode mode, bool unaligned_mems)
463 {
464 enum rtx_code code = GET_CODE (x);
465 gcc_checking_assert (mode == BLKmode || known_size_p (size));
466 poly_int64 const_x1;
467
468 /* The offset must be a multiple of the mode size if we are considering
469 unaligned memory references on strict alignment machines. */
470 if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
471 {
472 poly_int64 actual_offset = offset;
473
474 #ifdef SPARC_STACK_BOUNDARY_HACK
475 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
476 the real alignment of %sp. However, when it does this, the
477 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
478 if (SPARC_STACK_BOUNDARY_HACK
479 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
480 actual_offset -= STACK_POINTER_OFFSET;
481 #endif
482
483 if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
484 return 1;
485 }
486
487 switch (code)
488 {
489 case SYMBOL_REF:
490 if (SYMBOL_REF_WEAK (x))
491 return 1;
492 if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
493 {
494 tree decl;
495 poly_int64 decl_size;
496
497 if (maybe_lt (offset, 0))
498 return 1;
499 if (!known_size_p (size))
500 return maybe_ne (offset, 0);
501
502 /* If the size of the access or of the symbol is unknown,
503 assume the worst. */
504 decl = SYMBOL_REF_DECL (x);
505
506 /* Else check that the access is in bounds. TODO: restructure
507 expr_size/tree_expr_size/int_expr_size and just use the latter. */
508 if (!decl)
509 decl_size = -1;
510 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
511 {
512 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
513 decl_size = -1;
514 }
515 else if (TREE_CODE (decl) == STRING_CST)
516 decl_size = TREE_STRING_LENGTH (decl);
517 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
518 decl_size = int_size_in_bytes (TREE_TYPE (decl));
519 else
520 decl_size = -1;
521
522 return (!known_size_p (decl_size) || known_eq (decl_size, 0)
523 ? maybe_ne (offset, 0)
524 : !known_subrange_p (offset, size, 0, decl_size));
525 }
526
527 return 0;
528
529 case LABEL_REF:
530 return 0;
531
532 case REG:
533 /* Stack references are assumed not to trap, but we need to deal with
534 nonsensical offsets. */
535 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
536 || x == stack_pointer_rtx
537 /* The arg pointer varies if it is not a fixed register. */
538 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
539 {
540 #ifdef RED_ZONE_SIZE
541 poly_int64 red_zone_size = RED_ZONE_SIZE;
542 #else
543 poly_int64 red_zone_size = 0;
544 #endif
545 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
546 poly_int64 low_bound, high_bound;
547
548 if (!known_size_p (size))
549 return 1;
550
551 if (x == frame_pointer_rtx)
552 {
553 if (FRAME_GROWS_DOWNWARD)
554 {
555 high_bound = targetm.starting_frame_offset ();
556 low_bound = high_bound - get_frame_size ();
557 }
558 else
559 {
560 low_bound = targetm.starting_frame_offset ();
561 high_bound = low_bound + get_frame_size ();
562 }
563 }
564 else if (x == hard_frame_pointer_rtx)
565 {
566 poly_int64 sp_offset
567 = get_initial_register_offset (STACK_POINTER_REGNUM,
568 HARD_FRAME_POINTER_REGNUM);
569 poly_int64 ap_offset
570 = get_initial_register_offset (ARG_POINTER_REGNUM,
571 HARD_FRAME_POINTER_REGNUM);
572
573 #if STACK_GROWS_DOWNWARD
574 low_bound = sp_offset - red_zone_size - stack_boundary;
575 high_bound = ap_offset
576 + FIRST_PARM_OFFSET (current_function_decl)
577 #if !ARGS_GROW_DOWNWARD
578 + crtl->args.size
579 #endif
580 + stack_boundary;
581 #else
582 high_bound = sp_offset + red_zone_size + stack_boundary;
583 low_bound = ap_offset
584 + FIRST_PARM_OFFSET (current_function_decl)
585 #if ARGS_GROW_DOWNWARD
586 - crtl->args.size
587 #endif
588 - stack_boundary;
589 #endif
590 }
591 else if (x == stack_pointer_rtx)
592 {
593 poly_int64 ap_offset
594 = get_initial_register_offset (ARG_POINTER_REGNUM,
595 STACK_POINTER_REGNUM);
596
597 #if STACK_GROWS_DOWNWARD
598 low_bound = - red_zone_size - stack_boundary;
599 high_bound = ap_offset
600 + FIRST_PARM_OFFSET (current_function_decl)
601 #if !ARGS_GROW_DOWNWARD
602 + crtl->args.size
603 #endif
604 + stack_boundary;
605 #else
606 high_bound = red_zone_size + stack_boundary;
607 low_bound = ap_offset
608 + FIRST_PARM_OFFSET (current_function_decl)
609 #if ARGS_GROW_DOWNWARD
610 - crtl->args.size
611 #endif
612 - stack_boundary;
613 #endif
614 }
615 else
616 {
617 /* We assume that accesses are safe to at least the
618 next stack boundary.
619 Examples are varargs and __builtin_return_address. */
620 #if ARGS_GROW_DOWNWARD
621 high_bound = FIRST_PARM_OFFSET (current_function_decl)
622 + stack_boundary;
623 low_bound = FIRST_PARM_OFFSET (current_function_decl)
624 - crtl->args.size - stack_boundary;
625 #else
626 low_bound = FIRST_PARM_OFFSET (current_function_decl)
627 - stack_boundary;
628 high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 + crtl->args.size + stack_boundary;
630 #endif
631 }
632
633 if (known_ge (offset, low_bound)
634 && known_le (offset, high_bound - size))
635 return 0;
636 return 1;
637 }
638 /* All of the virtual frame registers are stack references. */
639 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
640 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
641 return 0;
642 return 1;
643
644 case CONST:
645 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
646 mode, unaligned_mems);
647
648 case PLUS:
649 /* An address is assumed not to trap if:
650 - it is the pic register plus a const unspec without offset. */
651 if (XEXP (x, 0) == pic_offset_table_rtx
652 && GET_CODE (XEXP (x, 1)) == CONST
653 && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
654 && known_eq (offset, 0))
655 return 0;
656
657 /* - or it is an address that can't trap plus a constant integer. */
658 if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
659 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
660 size, mode, unaligned_mems))
661 return 0;
662
663 return 1;
664
665 case LO_SUM:
666 case PRE_MODIFY:
667 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
668 mode, unaligned_mems);
669
670 case PRE_DEC:
671 case PRE_INC:
672 case POST_DEC:
673 case POST_INC:
674 case POST_MODIFY:
675 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
676 mode, unaligned_mems);
677
678 default:
679 break;
680 }
681
682 /* If it isn't one of the case above, it can cause a trap. */
683 return 1;
684 }
685
686 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
687
688 int
689 rtx_addr_can_trap_p (const_rtx x)
690 {
691 return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
692 }
693
694 /* Return true if X contains a MEM subrtx. */
695
696 bool
697 contains_mem_rtx_p (rtx x)
698 {
699 subrtx_iterator::array_type array;
700 FOR_EACH_SUBRTX (iter, array, x, ALL)
701 if (MEM_P (*iter))
702 return true;
703
704 return false;
705 }
706
707 /* Return true if X is an address that is known to not be zero. */
708
709 bool
710 nonzero_address_p (const_rtx x)
711 {
712 const enum rtx_code code = GET_CODE (x);
713
714 switch (code)
715 {
716 case SYMBOL_REF:
717 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
718
719 case LABEL_REF:
720 return true;
721
722 case REG:
723 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
724 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
725 || x == stack_pointer_rtx
726 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
727 return true;
728 /* All of the virtual frame registers are stack references. */
729 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
730 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
731 return true;
732 return false;
733
734 case CONST:
735 return nonzero_address_p (XEXP (x, 0));
736
737 case PLUS:
738 /* Handle PIC references. */
739 if (XEXP (x, 0) == pic_offset_table_rtx
740 && CONSTANT_P (XEXP (x, 1)))
741 return true;
742 return false;
743
744 case PRE_MODIFY:
745 /* Similar to the above; allow positive offsets. Further, since
746 auto-inc is only allowed in memories, the register must be a
747 pointer. */
748 if (CONST_INT_P (XEXP (x, 1))
749 && INTVAL (XEXP (x, 1)) > 0)
750 return true;
751 return nonzero_address_p (XEXP (x, 0));
752
753 case PRE_INC:
754 /* Similarly. Further, the offset is always positive. */
755 return true;
756
757 case PRE_DEC:
758 case POST_DEC:
759 case POST_INC:
760 case POST_MODIFY:
761 return nonzero_address_p (XEXP (x, 0));
762
763 case LO_SUM:
764 return nonzero_address_p (XEXP (x, 1));
765
766 default:
767 break;
768 }
769
770 /* If it isn't one of the case above, might be zero. */
771 return false;
772 }
773
774 /* Return 1 if X refers to a memory location whose address
775 cannot be compared reliably with constant addresses,
776 or if X refers to a BLKmode memory object.
777 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
778 zero, we are slightly more conservative. */
779
780 bool
781 rtx_addr_varies_p (const_rtx x, bool for_alias)
782 {
783 enum rtx_code code;
784 int i;
785 const char *fmt;
786
787 if (x == 0)
788 return 0;
789
790 code = GET_CODE (x);
791 if (code == MEM)
792 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
793
794 fmt = GET_RTX_FORMAT (code);
795 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
796 if (fmt[i] == 'e')
797 {
798 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
799 return 1;
800 }
801 else if (fmt[i] == 'E')
802 {
803 int j;
804 for (j = 0; j < XVECLEN (x, i); j++)
805 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
806 return 1;
807 }
808 return 0;
809 }
810 \f
811 /* Return the CALL in X if there is one. */
812
813 rtx
814 get_call_rtx_from (const rtx_insn *insn)
815 {
816 rtx x = PATTERN (insn);
817 if (GET_CODE (x) == PARALLEL)
818 x = XVECEXP (x, 0, 0);
819 if (GET_CODE (x) == SET)
820 x = SET_SRC (x);
821 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
822 return x;
823 return NULL_RTX;
824 }
825
826 /* Get the declaration of the function called by INSN. */
827
828 tree
829 get_call_fndecl (const rtx_insn *insn)
830 {
831 rtx note, datum;
832
833 note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
834 if (note == NULL_RTX)
835 return NULL_TREE;
836
837 datum = XEXP (note, 0);
838 if (datum != NULL_RTX)
839 return SYMBOL_REF_DECL (datum);
840
841 return NULL_TREE;
842 }
843 \f
844 /* Return the value of the integer term in X, if one is apparent;
845 otherwise return 0.
846 Only obvious integer terms are detected.
847 This is used in cse.c with the `related_value' field. */
848
849 HOST_WIDE_INT
850 get_integer_term (const_rtx x)
851 {
852 if (GET_CODE (x) == CONST)
853 x = XEXP (x, 0);
854
855 if (GET_CODE (x) == MINUS
856 && CONST_INT_P (XEXP (x, 1)))
857 return - INTVAL (XEXP (x, 1));
858 if (GET_CODE (x) == PLUS
859 && CONST_INT_P (XEXP (x, 1)))
860 return INTVAL (XEXP (x, 1));
861 return 0;
862 }
863
864 /* If X is a constant, return the value sans apparent integer term;
865 otherwise return 0.
866 Only obvious integer terms are detected. */
867
868 rtx
869 get_related_value (const_rtx x)
870 {
871 if (GET_CODE (x) != CONST)
872 return 0;
873 x = XEXP (x, 0);
874 if (GET_CODE (x) == PLUS
875 && CONST_INT_P (XEXP (x, 1)))
876 return XEXP (x, 0);
877 else if (GET_CODE (x) == MINUS
878 && CONST_INT_P (XEXP (x, 1)))
879 return XEXP (x, 0);
880 return 0;
881 }
882 \f
883 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
884 to somewhere in the same object or object_block as SYMBOL. */
885
886 bool
887 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
888 {
889 tree decl;
890
891 if (GET_CODE (symbol) != SYMBOL_REF)
892 return false;
893
894 if (offset == 0)
895 return true;
896
897 if (offset > 0)
898 {
899 if (CONSTANT_POOL_ADDRESS_P (symbol)
900 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
901 return true;
902
903 decl = SYMBOL_REF_DECL (symbol);
904 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
905 return true;
906 }
907
908 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
909 && SYMBOL_REF_BLOCK (symbol)
910 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
911 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
912 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
913 return true;
914
915 return false;
916 }
917
918 /* Split X into a base and a constant offset, storing them in *BASE_OUT
919 and *OFFSET_OUT respectively. */
920
921 void
922 split_const (rtx x, rtx *base_out, rtx *offset_out)
923 {
924 if (GET_CODE (x) == CONST)
925 {
926 x = XEXP (x, 0);
927 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
928 {
929 *base_out = XEXP (x, 0);
930 *offset_out = XEXP (x, 1);
931 return;
932 }
933 }
934 *base_out = x;
935 *offset_out = const0_rtx;
936 }
937
938 /* Express integer value X as some value Y plus a polynomial offset,
939 where Y is either const0_rtx, X or something within X (as opposed
940 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
941
942 rtx
943 strip_offset (rtx x, poly_int64_pod *offset_out)
944 {
945 rtx base = const0_rtx;
946 rtx test = x;
947 if (GET_CODE (test) == CONST)
948 test = XEXP (test, 0);
949 if (GET_CODE (test) == PLUS)
950 {
951 base = XEXP (test, 0);
952 test = XEXP (test, 1);
953 }
954 if (poly_int_rtx_p (test, offset_out))
955 return base;
956 *offset_out = 0;
957 return x;
958 }
959
960 /* Return the argument size in REG_ARGS_SIZE note X. */
961
962 poly_int64
963 get_args_size (const_rtx x)
964 {
965 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
966 return rtx_to_poly_int64 (XEXP (x, 0));
967 }
968 \f
969 /* Return the number of places FIND appears within X. If COUNT_DEST is
970 zero, we do not count occurrences inside the destination of a SET. */
971
972 int
973 count_occurrences (const_rtx x, const_rtx find, int count_dest)
974 {
975 int i, j;
976 enum rtx_code code;
977 const char *format_ptr;
978 int count;
979
980 if (x == find)
981 return 1;
982
983 code = GET_CODE (x);
984
985 switch (code)
986 {
987 case REG:
988 CASE_CONST_ANY:
989 case SYMBOL_REF:
990 case CODE_LABEL:
991 case PC:
992 case CC0:
993 return 0;
994
995 case EXPR_LIST:
996 count = count_occurrences (XEXP (x, 0), find, count_dest);
997 if (XEXP (x, 1))
998 count += count_occurrences (XEXP (x, 1), find, count_dest);
999 return count;
1000
1001 case MEM:
1002 if (MEM_P (find) && rtx_equal_p (x, find))
1003 return 1;
1004 break;
1005
1006 case SET:
1007 if (SET_DEST (x) == find && ! count_dest)
1008 return count_occurrences (SET_SRC (x), find, count_dest);
1009 break;
1010
1011 default:
1012 break;
1013 }
1014
1015 format_ptr = GET_RTX_FORMAT (code);
1016 count = 0;
1017
1018 for (i = 0; i < GET_RTX_LENGTH (code); i++)
1019 {
1020 switch (*format_ptr++)
1021 {
1022 case 'e':
1023 count += count_occurrences (XEXP (x, i), find, count_dest);
1024 break;
1025
1026 case 'E':
1027 for (j = 0; j < XVECLEN (x, i); j++)
1028 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1029 break;
1030 }
1031 }
1032 return count;
1033 }
1034
1035 \f
1036 /* Return TRUE if OP is a register or subreg of a register that
1037 holds an unsigned quantity. Otherwise, return FALSE. */
1038
1039 bool
1040 unsigned_reg_p (rtx op)
1041 {
1042 if (REG_P (op)
1043 && REG_EXPR (op)
1044 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1045 return true;
1046
1047 if (GET_CODE (op) == SUBREG
1048 && SUBREG_PROMOTED_SIGN (op))
1049 return true;
1050
1051 return false;
1052 }
1053
1054 \f
1055 /* Nonzero if register REG appears somewhere within IN.
1056 Also works if REG is not a register; in this case it checks
1057 for a subexpression of IN that is Lisp "equal" to REG. */
1058
1059 int
1060 reg_mentioned_p (const_rtx reg, const_rtx in)
1061 {
1062 const char *fmt;
1063 int i;
1064 enum rtx_code code;
1065
1066 if (in == 0)
1067 return 0;
1068
1069 if (reg == in)
1070 return 1;
1071
1072 if (GET_CODE (in) == LABEL_REF)
1073 return reg == label_ref_label (in);
1074
1075 code = GET_CODE (in);
1076
1077 switch (code)
1078 {
1079 /* Compare registers by number. */
1080 case REG:
1081 return REG_P (reg) && REGNO (in) == REGNO (reg);
1082
1083 /* These codes have no constituent expressions
1084 and are unique. */
1085 case SCRATCH:
1086 case CC0:
1087 case PC:
1088 return 0;
1089
1090 CASE_CONST_ANY:
1091 /* These are kept unique for a given value. */
1092 return 0;
1093
1094 default:
1095 break;
1096 }
1097
1098 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1099 return 1;
1100
1101 fmt = GET_RTX_FORMAT (code);
1102
1103 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1104 {
1105 if (fmt[i] == 'E')
1106 {
1107 int j;
1108 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1109 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1110 return 1;
1111 }
1112 else if (fmt[i] == 'e'
1113 && reg_mentioned_p (reg, XEXP (in, i)))
1114 return 1;
1115 }
1116 return 0;
1117 }
1118 \f
1119 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1120 no CODE_LABEL insn. */
1121
1122 int
1123 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1124 {
1125 rtx_insn *p;
1126 if (beg == end)
1127 return 0;
1128 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1129 if (LABEL_P (p))
1130 return 0;
1131 return 1;
1132 }
1133
1134 /* Nonzero if register REG is used in an insn between
1135 FROM_INSN and TO_INSN (exclusive of those two). */
1136
1137 int
1138 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1139 const rtx_insn *to_insn)
1140 {
1141 rtx_insn *insn;
1142
1143 if (from_insn == to_insn)
1144 return 0;
1145
1146 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1147 if (NONDEBUG_INSN_P (insn)
1148 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1149 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1150 return 1;
1151 return 0;
1152 }
1153 \f
1154 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1155 is entirely replaced by a new value and the only use is as a SET_DEST,
1156 we do not consider it a reference. */
1157
1158 int
1159 reg_referenced_p (const_rtx x, const_rtx body)
1160 {
1161 int i;
1162
1163 switch (GET_CODE (body))
1164 {
1165 case SET:
1166 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1167 return 1;
1168
1169 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1170 of a REG that occupies all of the REG, the insn references X if
1171 it is mentioned in the destination. */
1172 if (GET_CODE (SET_DEST (body)) != CC0
1173 && GET_CODE (SET_DEST (body)) != PC
1174 && !REG_P (SET_DEST (body))
1175 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1176 && REG_P (SUBREG_REG (SET_DEST (body)))
1177 && !read_modify_subreg_p (SET_DEST (body)))
1178 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1179 return 1;
1180 return 0;
1181
1182 case ASM_OPERANDS:
1183 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1184 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1185 return 1;
1186 return 0;
1187
1188 case CALL:
1189 case USE:
1190 case IF_THEN_ELSE:
1191 return reg_overlap_mentioned_p (x, body);
1192
1193 case TRAP_IF:
1194 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1195
1196 case PREFETCH:
1197 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1198
1199 case UNSPEC:
1200 case UNSPEC_VOLATILE:
1201 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1202 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1203 return 1;
1204 return 0;
1205
1206 case PARALLEL:
1207 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1208 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1209 return 1;
1210 return 0;
1211
1212 case CLOBBER:
1213 if (MEM_P (XEXP (body, 0)))
1214 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1215 return 1;
1216 return 0;
1217
1218 case CLOBBER_HIGH:
1219 gcc_assert (REG_P (XEXP (body, 0)));
1220 return 0;
1221
1222 case COND_EXEC:
1223 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1224 return 1;
1225 return reg_referenced_p (x, COND_EXEC_CODE (body));
1226
1227 default:
1228 return 0;
1229 }
1230 }
1231 \f
1232 /* Nonzero if register REG is set or clobbered in an insn between
1233 FROM_INSN and TO_INSN (exclusive of those two). */
1234
1235 int
1236 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1237 const rtx_insn *to_insn)
1238 {
1239 const rtx_insn *insn;
1240
1241 if (from_insn == to_insn)
1242 return 0;
1243
1244 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1245 if (INSN_P (insn) && reg_set_p (reg, insn))
1246 return 1;
1247 return 0;
1248 }
1249
1250 /* Return true if REG is set or clobbered inside INSN. */
1251
1252 int
1253 reg_set_p (const_rtx reg, const_rtx insn)
1254 {
1255 /* After delay slot handling, call and branch insns might be in a
1256 sequence. Check all the elements there. */
1257 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1258 {
1259 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1260 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1261 return true;
1262
1263 return false;
1264 }
1265
1266 /* We can be passed an insn or part of one. If we are passed an insn,
1267 check if a side-effect of the insn clobbers REG. */
1268 if (INSN_P (insn)
1269 && (FIND_REG_INC_NOTE (insn, reg)
1270 || (CALL_P (insn)
1271 && ((REG_P (reg)
1272 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1273 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1274 GET_MODE (reg), REGNO (reg)))
1275 || MEM_P (reg)
1276 || find_reg_fusage (insn, CLOBBER, reg)))))
1277 return true;
1278
1279 /* There are no REG_INC notes for SP autoinc. */
1280 if (reg == stack_pointer_rtx && INSN_P (insn))
1281 {
1282 subrtx_var_iterator::array_type array;
1283 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1284 {
1285 rtx mem = *iter;
1286 if (mem
1287 && MEM_P (mem)
1288 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1289 {
1290 if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1291 return true;
1292 iter.skip_subrtxes ();
1293 }
1294 }
1295 }
1296
1297 return set_of (reg, insn) != NULL_RTX;
1298 }
1299
1300 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1301 only if none of them are modified between START and END. Return 1 if
1302 X contains a MEM; this routine does use memory aliasing. */
1303
1304 int
1305 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1306 {
1307 const enum rtx_code code = GET_CODE (x);
1308 const char *fmt;
1309 int i, j;
1310 rtx_insn *insn;
1311
1312 if (start == end)
1313 return 0;
1314
1315 switch (code)
1316 {
1317 CASE_CONST_ANY:
1318 case CONST:
1319 case SYMBOL_REF:
1320 case LABEL_REF:
1321 return 0;
1322
1323 case PC:
1324 case CC0:
1325 return 1;
1326
1327 case MEM:
1328 if (modified_between_p (XEXP (x, 0), start, end))
1329 return 1;
1330 if (MEM_READONLY_P (x))
1331 return 0;
1332 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1333 if (memory_modified_in_insn_p (x, insn))
1334 return 1;
1335 return 0;
1336
1337 case REG:
1338 return reg_set_between_p (x, start, end);
1339
1340 default:
1341 break;
1342 }
1343
1344 fmt = GET_RTX_FORMAT (code);
1345 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1346 {
1347 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1348 return 1;
1349
1350 else if (fmt[i] == 'E')
1351 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1352 if (modified_between_p (XVECEXP (x, i, j), start, end))
1353 return 1;
1354 }
1355
1356 return 0;
1357 }
1358
1359 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1360 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1361 does use memory aliasing. */
1362
1363 int
1364 modified_in_p (const_rtx x, const_rtx insn)
1365 {
1366 const enum rtx_code code = GET_CODE (x);
1367 const char *fmt;
1368 int i, j;
1369
1370 switch (code)
1371 {
1372 CASE_CONST_ANY:
1373 case CONST:
1374 case SYMBOL_REF:
1375 case LABEL_REF:
1376 return 0;
1377
1378 case PC:
1379 case CC0:
1380 return 1;
1381
1382 case MEM:
1383 if (modified_in_p (XEXP (x, 0), insn))
1384 return 1;
1385 if (MEM_READONLY_P (x))
1386 return 0;
1387 if (memory_modified_in_insn_p (x, insn))
1388 return 1;
1389 return 0;
1390
1391 case REG:
1392 return reg_set_p (x, insn);
1393
1394 default:
1395 break;
1396 }
1397
1398 fmt = GET_RTX_FORMAT (code);
1399 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1400 {
1401 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1402 return 1;
1403
1404 else if (fmt[i] == 'E')
1405 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1406 if (modified_in_p (XVECEXP (x, i, j), insn))
1407 return 1;
1408 }
1409
1410 return 0;
1411 }
1412
1413 /* Return true if X is a SUBREG and if storing a value to X would
1414 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1415 target, using a SUBREG to store to one half of a DImode REG would
1416 preserve the other half. */
1417
1418 bool
1419 read_modify_subreg_p (const_rtx x)
1420 {
1421 if (GET_CODE (x) != SUBREG)
1422 return false;
1423 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1424 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1425 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1426 /* The inner and outer modes of a subreg must be ordered, so that we
1427 can tell whether they're paradoxical or partial. */
1428 gcc_checking_assert (ordered_p (isize, osize));
1429 return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1430 }
1431 \f
1432 /* Helper function for set_of. */
1433 struct set_of_data
1434 {
1435 const_rtx found;
1436 const_rtx pat;
1437 };
1438
1439 static void
1440 set_of_1 (rtx x, const_rtx pat, void *data1)
1441 {
1442 struct set_of_data *const data = (struct set_of_data *) (data1);
1443 if (rtx_equal_p (x, data->pat)
1444 || (GET_CODE (pat) == CLOBBER_HIGH
1445 && REGNO(data->pat) == REGNO(XEXP (pat, 0))
1446 && reg_is_clobbered_by_clobber_high (data->pat, XEXP (pat, 0)))
1447 || (GET_CODE (pat) != CLOBBER_HIGH && !MEM_P (x)
1448 && reg_overlap_mentioned_p (data->pat, x)))
1449 data->found = pat;
1450 }
1451
1452 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1453 (either directly or via STRICT_LOW_PART and similar modifiers). */
1454 const_rtx
1455 set_of (const_rtx pat, const_rtx insn)
1456 {
1457 struct set_of_data data;
1458 data.found = NULL_RTX;
1459 data.pat = pat;
1460 note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1461 return data.found;
1462 }
1463
1464 /* Add all hard register in X to *PSET. */
1465 void
1466 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1467 {
1468 subrtx_iterator::array_type array;
1469 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1470 {
1471 const_rtx x = *iter;
1472 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1473 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1474 }
1475 }
1476
1477 /* This function, called through note_stores, collects sets and
1478 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1479 by DATA. */
1480 void
1481 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1482 {
1483 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1484 if (REG_P (x) && HARD_REGISTER_P (x))
1485 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1486 }
1487
1488 /* Examine INSN, and compute the set of hard registers written by it.
1489 Store it in *PSET. Should only be called after reload. */
1490 void
1491 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1492 {
1493 rtx link;
1494
1495 CLEAR_HARD_REG_SET (*pset);
1496 note_stores (insn, record_hard_reg_sets, pset);
1497 if (CALL_P (insn) && implicit)
1498 *pset |= call_used_or_fixed_regs;
1499 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1500 if (REG_NOTE_KIND (link) == REG_INC)
1501 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1502 }
1503
1504 /* Like record_hard_reg_sets, but called through note_uses. */
1505 void
1506 record_hard_reg_uses (rtx *px, void *data)
1507 {
1508 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1509 }
1510 \f
1511 /* Given an INSN, return a SET expression if this insn has only a single SET.
1512 It may also have CLOBBERs, USEs, or SET whose output
1513 will not be used, which we ignore. */
1514
1515 rtx
1516 single_set_2 (const rtx_insn *insn, const_rtx pat)
1517 {
1518 rtx set = NULL;
1519 int set_verified = 1;
1520 int i;
1521
1522 if (GET_CODE (pat) == PARALLEL)
1523 {
1524 for (i = 0; i < XVECLEN (pat, 0); i++)
1525 {
1526 rtx sub = XVECEXP (pat, 0, i);
1527 switch (GET_CODE (sub))
1528 {
1529 case USE:
1530 case CLOBBER:
1531 case CLOBBER_HIGH:
1532 break;
1533
1534 case SET:
1535 /* We can consider insns having multiple sets, where all
1536 but one are dead as single set insns. In common case
1537 only single set is present in the pattern so we want
1538 to avoid checking for REG_UNUSED notes unless necessary.
1539
1540 When we reach set first time, we just expect this is
1541 the single set we are looking for and only when more
1542 sets are found in the insn, we check them. */
1543 if (!set_verified)
1544 {
1545 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1546 && !side_effects_p (set))
1547 set = NULL;
1548 else
1549 set_verified = 1;
1550 }
1551 if (!set)
1552 set = sub, set_verified = 0;
1553 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1554 || side_effects_p (sub))
1555 return NULL_RTX;
1556 break;
1557
1558 default:
1559 return NULL_RTX;
1560 }
1561 }
1562 }
1563 return set;
1564 }
1565
1566 /* Given an INSN, return nonzero if it has more than one SET, else return
1567 zero. */
1568
1569 int
1570 multiple_sets (const_rtx insn)
1571 {
1572 int found;
1573 int i;
1574
1575 /* INSN must be an insn. */
1576 if (! INSN_P (insn))
1577 return 0;
1578
1579 /* Only a PARALLEL can have multiple SETs. */
1580 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1581 {
1582 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1583 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1584 {
1585 /* If we have already found a SET, then return now. */
1586 if (found)
1587 return 1;
1588 else
1589 found = 1;
1590 }
1591 }
1592
1593 /* Either zero or one SET. */
1594 return 0;
1595 }
1596 \f
1597 /* Return nonzero if the destination of SET equals the source
1598 and there are no side effects. */
1599
1600 int
1601 set_noop_p (const_rtx set)
1602 {
1603 rtx src = SET_SRC (set);
1604 rtx dst = SET_DEST (set);
1605
1606 if (dst == pc_rtx && src == pc_rtx)
1607 return 1;
1608
1609 if (MEM_P (dst) && MEM_P (src))
1610 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1611
1612 if (GET_CODE (dst) == ZERO_EXTRACT)
1613 return rtx_equal_p (XEXP (dst, 0), src)
1614 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1615 && !side_effects_p (src);
1616
1617 if (GET_CODE (dst) == STRICT_LOW_PART)
1618 dst = XEXP (dst, 0);
1619
1620 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1621 {
1622 if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1623 return 0;
1624 src = SUBREG_REG (src);
1625 dst = SUBREG_REG (dst);
1626 }
1627
1628 /* It is a NOOP if destination overlaps with selected src vector
1629 elements. */
1630 if (GET_CODE (src) == VEC_SELECT
1631 && REG_P (XEXP (src, 0)) && REG_P (dst)
1632 && HARD_REGISTER_P (XEXP (src, 0))
1633 && HARD_REGISTER_P (dst))
1634 {
1635 int i;
1636 rtx par = XEXP (src, 1);
1637 rtx src0 = XEXP (src, 0);
1638 poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
1639 poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1640
1641 for (i = 1; i < XVECLEN (par, 0); i++)
1642 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
1643 return 0;
1644 return
1645 REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1646 && simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1647 offset, GET_MODE (dst)) == (int) REGNO (dst);
1648 }
1649
1650 return (REG_P (src) && REG_P (dst)
1651 && REGNO (src) == REGNO (dst));
1652 }
1653 \f
1654 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1655 value to itself. */
1656
1657 int
1658 noop_move_p (const rtx_insn *insn)
1659 {
1660 rtx pat = PATTERN (insn);
1661
1662 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1663 return 1;
1664
1665 /* Insns carrying these notes are useful later on. */
1666 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1667 return 0;
1668
1669 /* Check the code to be executed for COND_EXEC. */
1670 if (GET_CODE (pat) == COND_EXEC)
1671 pat = COND_EXEC_CODE (pat);
1672
1673 if (GET_CODE (pat) == SET && set_noop_p (pat))
1674 return 1;
1675
1676 if (GET_CODE (pat) == PARALLEL)
1677 {
1678 int i;
1679 /* If nothing but SETs of registers to themselves,
1680 this insn can also be deleted. */
1681 for (i = 0; i < XVECLEN (pat, 0); i++)
1682 {
1683 rtx tem = XVECEXP (pat, 0, i);
1684
1685 if (GET_CODE (tem) == USE
1686 || GET_CODE (tem) == CLOBBER
1687 || GET_CODE (tem) == CLOBBER_HIGH)
1688 continue;
1689
1690 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1691 return 0;
1692 }
1693
1694 return 1;
1695 }
1696 return 0;
1697 }
1698 \f
1699
1700 /* Return nonzero if register in range [REGNO, ENDREGNO)
1701 appears either explicitly or implicitly in X
1702 other than being stored into.
1703
1704 References contained within the substructure at LOC do not count.
1705 LOC may be zero, meaning don't ignore anything. */
1706
1707 bool
1708 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1709 rtx *loc)
1710 {
1711 int i;
1712 unsigned int x_regno;
1713 RTX_CODE code;
1714 const char *fmt;
1715
1716 repeat:
1717 /* The contents of a REG_NONNEG note is always zero, so we must come here
1718 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1719 if (x == 0)
1720 return false;
1721
1722 code = GET_CODE (x);
1723
1724 switch (code)
1725 {
1726 case REG:
1727 x_regno = REGNO (x);
1728
1729 /* If we modifying the stack, frame, or argument pointer, it will
1730 clobber a virtual register. In fact, we could be more precise,
1731 but it isn't worth it. */
1732 if ((x_regno == STACK_POINTER_REGNUM
1733 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1734 && x_regno == ARG_POINTER_REGNUM)
1735 || x_regno == FRAME_POINTER_REGNUM)
1736 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1737 return true;
1738
1739 return endregno > x_regno && regno < END_REGNO (x);
1740
1741 case SUBREG:
1742 /* If this is a SUBREG of a hard reg, we can see exactly which
1743 registers are being modified. Otherwise, handle normally. */
1744 if (REG_P (SUBREG_REG (x))
1745 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1746 {
1747 unsigned int inner_regno = subreg_regno (x);
1748 unsigned int inner_endregno
1749 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1750 ? subreg_nregs (x) : 1);
1751
1752 return endregno > inner_regno && regno < inner_endregno;
1753 }
1754 break;
1755
1756 case CLOBBER:
1757 case SET:
1758 if (&SET_DEST (x) != loc
1759 /* Note setting a SUBREG counts as referring to the REG it is in for
1760 a pseudo but not for hard registers since we can
1761 treat each word individually. */
1762 && ((GET_CODE (SET_DEST (x)) == SUBREG
1763 && loc != &SUBREG_REG (SET_DEST (x))
1764 && REG_P (SUBREG_REG (SET_DEST (x)))
1765 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1766 && refers_to_regno_p (regno, endregno,
1767 SUBREG_REG (SET_DEST (x)), loc))
1768 || (!REG_P (SET_DEST (x))
1769 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1770 return true;
1771
1772 if (code == CLOBBER || loc == &SET_SRC (x))
1773 return false;
1774 x = SET_SRC (x);
1775 goto repeat;
1776
1777 default:
1778 break;
1779 }
1780
1781 /* X does not match, so try its subexpressions. */
1782
1783 fmt = GET_RTX_FORMAT (code);
1784 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1785 {
1786 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1787 {
1788 if (i == 0)
1789 {
1790 x = XEXP (x, 0);
1791 goto repeat;
1792 }
1793 else
1794 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1795 return true;
1796 }
1797 else if (fmt[i] == 'E')
1798 {
1799 int j;
1800 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1801 if (loc != &XVECEXP (x, i, j)
1802 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1803 return true;
1804 }
1805 }
1806 return false;
1807 }
1808
1809 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1810 we check if any register number in X conflicts with the relevant register
1811 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1812 contains a MEM (we don't bother checking for memory addresses that can't
1813 conflict because we expect this to be a rare case. */
1814
1815 int
1816 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1817 {
1818 unsigned int regno, endregno;
1819
1820 /* If either argument is a constant, then modifying X cannot
1821 affect IN. Here we look at IN, we can profitably combine
1822 CONSTANT_P (x) with the switch statement below. */
1823 if (CONSTANT_P (in))
1824 return 0;
1825
1826 recurse:
1827 switch (GET_CODE (x))
1828 {
1829 case CLOBBER:
1830 case STRICT_LOW_PART:
1831 case ZERO_EXTRACT:
1832 case SIGN_EXTRACT:
1833 /* Overly conservative. */
1834 x = XEXP (x, 0);
1835 goto recurse;
1836
1837 case SUBREG:
1838 regno = REGNO (SUBREG_REG (x));
1839 if (regno < FIRST_PSEUDO_REGISTER)
1840 regno = subreg_regno (x);
1841 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1842 ? subreg_nregs (x) : 1);
1843 goto do_reg;
1844
1845 case REG:
1846 regno = REGNO (x);
1847 endregno = END_REGNO (x);
1848 do_reg:
1849 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1850
1851 case MEM:
1852 {
1853 const char *fmt;
1854 int i;
1855
1856 if (MEM_P (in))
1857 return 1;
1858
1859 fmt = GET_RTX_FORMAT (GET_CODE (in));
1860 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1861 if (fmt[i] == 'e')
1862 {
1863 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1864 return 1;
1865 }
1866 else if (fmt[i] == 'E')
1867 {
1868 int j;
1869 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1870 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1871 return 1;
1872 }
1873
1874 return 0;
1875 }
1876
1877 case SCRATCH:
1878 case PC:
1879 case CC0:
1880 return reg_mentioned_p (x, in);
1881
1882 case PARALLEL:
1883 {
1884 int i;
1885
1886 /* If any register in here refers to it we return true. */
1887 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1888 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1889 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1890 return 1;
1891 return 0;
1892 }
1893
1894 default:
1895 gcc_assert (CONSTANT_P (x));
1896 return 0;
1897 }
1898 }
1899 \f
1900 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1901 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1902 ignored by note_stores, but passed to FUN.
1903
1904 FUN receives three arguments:
1905 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1906 2. the SET or CLOBBER rtx that does the store,
1907 3. the pointer DATA provided to note_stores.
1908
1909 If the item being stored in or clobbered is a SUBREG of a hard register,
1910 the SUBREG will be passed. */
1911
1912 void
1913 note_pattern_stores (const_rtx x,
1914 void (*fun) (rtx, const_rtx, void *), void *data)
1915 {
1916 int i;
1917
1918 if (GET_CODE (x) == COND_EXEC)
1919 x = COND_EXEC_CODE (x);
1920
1921 if (GET_CODE (x) == SET
1922 || GET_CODE (x) == CLOBBER
1923 || GET_CODE (x) == CLOBBER_HIGH)
1924 {
1925 rtx dest = SET_DEST (x);
1926
1927 while ((GET_CODE (dest) == SUBREG
1928 && (!REG_P (SUBREG_REG (dest))
1929 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1930 || GET_CODE (dest) == ZERO_EXTRACT
1931 || GET_CODE (dest) == STRICT_LOW_PART)
1932 dest = XEXP (dest, 0);
1933
1934 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1935 each of whose first operand is a register. */
1936 if (GET_CODE (dest) == PARALLEL)
1937 {
1938 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1939 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1940 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1941 }
1942 else
1943 (*fun) (dest, x, data);
1944 }
1945
1946 else if (GET_CODE (x) == PARALLEL)
1947 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1948 note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1949 }
1950
1951 /* Same, but for an instruction. If the instruction is a call, include
1952 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1953
1954 void
1955 note_stores (const rtx_insn *insn,
1956 void (*fun) (rtx, const_rtx, void *), void *data)
1957 {
1958 if (CALL_P (insn))
1959 for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1960 link; link = XEXP (link, 1))
1961 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1962 note_pattern_stores (XEXP (link, 0), fun, data);
1963 note_pattern_stores (PATTERN (insn), fun, data);
1964 }
1965 \f
1966 /* Like notes_stores, but call FUN for each expression that is being
1967 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1968 FUN for each expression, not any interior subexpressions. FUN receives a
1969 pointer to the expression and the DATA passed to this function.
1970
1971 Note that this is not quite the same test as that done in reg_referenced_p
1972 since that considers something as being referenced if it is being
1973 partially set, while we do not. */
1974
1975 void
1976 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1977 {
1978 rtx body = *pbody;
1979 int i;
1980
1981 switch (GET_CODE (body))
1982 {
1983 case COND_EXEC:
1984 (*fun) (&COND_EXEC_TEST (body), data);
1985 note_uses (&COND_EXEC_CODE (body), fun, data);
1986 return;
1987
1988 case PARALLEL:
1989 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1990 note_uses (&XVECEXP (body, 0, i), fun, data);
1991 return;
1992
1993 case SEQUENCE:
1994 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1995 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1996 return;
1997
1998 case USE:
1999 (*fun) (&XEXP (body, 0), data);
2000 return;
2001
2002 case ASM_OPERANDS:
2003 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
2004 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
2005 return;
2006
2007 case TRAP_IF:
2008 (*fun) (&TRAP_CONDITION (body), data);
2009 return;
2010
2011 case PREFETCH:
2012 (*fun) (&XEXP (body, 0), data);
2013 return;
2014
2015 case UNSPEC:
2016 case UNSPEC_VOLATILE:
2017 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2018 (*fun) (&XVECEXP (body, 0, i), data);
2019 return;
2020
2021 case CLOBBER:
2022 if (MEM_P (XEXP (body, 0)))
2023 (*fun) (&XEXP (XEXP (body, 0), 0), data);
2024 return;
2025
2026 case SET:
2027 {
2028 rtx dest = SET_DEST (body);
2029
2030 /* For sets we replace everything in source plus registers in memory
2031 expression in store and operands of a ZERO_EXTRACT. */
2032 (*fun) (&SET_SRC (body), data);
2033
2034 if (GET_CODE (dest) == ZERO_EXTRACT)
2035 {
2036 (*fun) (&XEXP (dest, 1), data);
2037 (*fun) (&XEXP (dest, 2), data);
2038 }
2039
2040 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2041 dest = XEXP (dest, 0);
2042
2043 if (MEM_P (dest))
2044 (*fun) (&XEXP (dest, 0), data);
2045 }
2046 return;
2047
2048 default:
2049 /* All the other possibilities never store. */
2050 (*fun) (pbody, data);
2051 return;
2052 }
2053 }
2054 \f
2055 /* Return nonzero if X's old contents don't survive after INSN.
2056 This will be true if X is (cc0) or if X is a register and
2057 X dies in INSN or because INSN entirely sets X.
2058
2059 "Entirely set" means set directly and not through a SUBREG, or
2060 ZERO_EXTRACT, so no trace of the old contents remains.
2061 Likewise, REG_INC does not count.
2062
2063 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2064 but for this use that makes no difference, since regs don't overlap
2065 during their lifetimes. Therefore, this function may be used
2066 at any time after deaths have been computed.
2067
2068 If REG is a hard reg that occupies multiple machine registers, this
2069 function will only return 1 if each of those registers will be replaced
2070 by INSN. */
2071
2072 int
2073 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2074 {
2075 unsigned int regno, end_regno;
2076 unsigned int i;
2077
2078 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2079 if (GET_CODE (x) == CC0)
2080 return 1;
2081
2082 gcc_assert (REG_P (x));
2083
2084 regno = REGNO (x);
2085 end_regno = END_REGNO (x);
2086 for (i = regno; i < end_regno; i++)
2087 if (! dead_or_set_regno_p (insn, i))
2088 return 0;
2089
2090 return 1;
2091 }
2092
2093 /* Return TRUE iff DEST is a register or subreg of a register, is a
2094 complete rather than read-modify-write destination, and contains
2095 register TEST_REGNO. */
2096
2097 static bool
2098 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2099 {
2100 unsigned int regno, endregno;
2101
2102 if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2103 dest = SUBREG_REG (dest);
2104
2105 if (!REG_P (dest))
2106 return false;
2107
2108 regno = REGNO (dest);
2109 endregno = END_REGNO (dest);
2110 return (test_regno >= regno && test_regno < endregno);
2111 }
2112
2113 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2114 any member matches the covers_regno_no_parallel_p criteria. */
2115
2116 static bool
2117 covers_regno_p (const_rtx dest, unsigned int test_regno)
2118 {
2119 if (GET_CODE (dest) == PARALLEL)
2120 {
2121 /* Some targets place small structures in registers for return
2122 values of functions, and those registers are wrapped in
2123 PARALLELs that we may see as the destination of a SET. */
2124 int i;
2125
2126 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2127 {
2128 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2129 if (inner != NULL_RTX
2130 && covers_regno_no_parallel_p (inner, test_regno))
2131 return true;
2132 }
2133
2134 return false;
2135 }
2136 else
2137 return covers_regno_no_parallel_p (dest, test_regno);
2138 }
2139
2140 /* Utility function for dead_or_set_p to check an individual register. */
2141
2142 int
2143 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2144 {
2145 const_rtx pattern;
2146
2147 /* See if there is a death note for something that includes TEST_REGNO. */
2148 if (find_regno_note (insn, REG_DEAD, test_regno))
2149 return 1;
2150
2151 if (CALL_P (insn)
2152 && find_regno_fusage (insn, CLOBBER, test_regno))
2153 return 1;
2154
2155 pattern = PATTERN (insn);
2156
2157 /* If a COND_EXEC is not executed, the value survives. */
2158 if (GET_CODE (pattern) == COND_EXEC)
2159 return 0;
2160
2161 if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2162 return covers_regno_p (SET_DEST (pattern), test_regno);
2163 else if (GET_CODE (pattern) == PARALLEL)
2164 {
2165 int i;
2166
2167 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2168 {
2169 rtx body = XVECEXP (pattern, 0, i);
2170
2171 if (GET_CODE (body) == COND_EXEC)
2172 body = COND_EXEC_CODE (body);
2173
2174 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2175 && covers_regno_p (SET_DEST (body), test_regno))
2176 return 1;
2177 }
2178 }
2179
2180 return 0;
2181 }
2182
2183 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2184 If DATUM is nonzero, look for one whose datum is DATUM. */
2185
2186 rtx
2187 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2188 {
2189 rtx link;
2190
2191 gcc_checking_assert (insn);
2192
2193 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2194 if (! INSN_P (insn))
2195 return 0;
2196 if (datum == 0)
2197 {
2198 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2199 if (REG_NOTE_KIND (link) == kind)
2200 return link;
2201 return 0;
2202 }
2203
2204 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2205 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2206 return link;
2207 return 0;
2208 }
2209
2210 /* Return the reg-note of kind KIND in insn INSN which applies to register
2211 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2212 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2213 it might be the case that the note overlaps REGNO. */
2214
2215 rtx
2216 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2217 {
2218 rtx link;
2219
2220 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2221 if (! INSN_P (insn))
2222 return 0;
2223
2224 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2225 if (REG_NOTE_KIND (link) == kind
2226 /* Verify that it is a register, so that scratch and MEM won't cause a
2227 problem here. */
2228 && REG_P (XEXP (link, 0))
2229 && REGNO (XEXP (link, 0)) <= regno
2230 && END_REGNO (XEXP (link, 0)) > regno)
2231 return link;
2232 return 0;
2233 }
2234
2235 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2236 has such a note. */
2237
2238 rtx
2239 find_reg_equal_equiv_note (const_rtx insn)
2240 {
2241 rtx link;
2242
2243 if (!INSN_P (insn))
2244 return 0;
2245
2246 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2247 if (REG_NOTE_KIND (link) == REG_EQUAL
2248 || REG_NOTE_KIND (link) == REG_EQUIV)
2249 {
2250 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2251 insns that have multiple sets. Checking single_set to
2252 make sure of this is not the proper check, as explained
2253 in the comment in set_unique_reg_note.
2254
2255 This should be changed into an assert. */
2256 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2257 return 0;
2258 return link;
2259 }
2260 return NULL;
2261 }
2262
2263 /* Check whether INSN is a single_set whose source is known to be
2264 equivalent to a constant. Return that constant if so, otherwise
2265 return null. */
2266
2267 rtx
2268 find_constant_src (const rtx_insn *insn)
2269 {
2270 rtx note, set, x;
2271
2272 set = single_set (insn);
2273 if (set)
2274 {
2275 x = avoid_constant_pool_reference (SET_SRC (set));
2276 if (CONSTANT_P (x))
2277 return x;
2278 }
2279
2280 note = find_reg_equal_equiv_note (insn);
2281 if (note && CONSTANT_P (XEXP (note, 0)))
2282 return XEXP (note, 0);
2283
2284 return NULL_RTX;
2285 }
2286
2287 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2288 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2289
2290 int
2291 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2292 {
2293 /* If it's not a CALL_INSN, it can't possibly have a
2294 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2295 if (!CALL_P (insn))
2296 return 0;
2297
2298 gcc_assert (datum);
2299
2300 if (!REG_P (datum))
2301 {
2302 rtx link;
2303
2304 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2305 link;
2306 link = XEXP (link, 1))
2307 if (GET_CODE (XEXP (link, 0)) == code
2308 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2309 return 1;
2310 }
2311 else
2312 {
2313 unsigned int regno = REGNO (datum);
2314
2315 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2316 to pseudo registers, so don't bother checking. */
2317
2318 if (regno < FIRST_PSEUDO_REGISTER)
2319 {
2320 unsigned int end_regno = END_REGNO (datum);
2321 unsigned int i;
2322
2323 for (i = regno; i < end_regno; i++)
2324 if (find_regno_fusage (insn, code, i))
2325 return 1;
2326 }
2327 }
2328
2329 return 0;
2330 }
2331
2332 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2333 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2334
2335 int
2336 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2337 {
2338 rtx link;
2339
2340 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2341 to pseudo registers, so don't bother checking. */
2342
2343 if (regno >= FIRST_PSEUDO_REGISTER
2344 || !CALL_P (insn) )
2345 return 0;
2346
2347 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2348 {
2349 rtx op, reg;
2350
2351 if (GET_CODE (op = XEXP (link, 0)) == code
2352 && REG_P (reg = XEXP (op, 0))
2353 && REGNO (reg) <= regno
2354 && END_REGNO (reg) > regno)
2355 return 1;
2356 }
2357
2358 return 0;
2359 }
2360
2361 \f
2362 /* Return true if KIND is an integer REG_NOTE. */
2363
2364 static bool
2365 int_reg_note_p (enum reg_note kind)
2366 {
2367 return kind == REG_BR_PROB;
2368 }
2369
2370 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2371 stored as the pointer to the next register note. */
2372
2373 rtx
2374 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2375 {
2376 rtx note;
2377
2378 gcc_checking_assert (!int_reg_note_p (kind));
2379 switch (kind)
2380 {
2381 case REG_CC_SETTER:
2382 case REG_CC_USER:
2383 case REG_LABEL_TARGET:
2384 case REG_LABEL_OPERAND:
2385 case REG_TM:
2386 /* These types of register notes use an INSN_LIST rather than an
2387 EXPR_LIST, so that copying is done right and dumps look
2388 better. */
2389 note = alloc_INSN_LIST (datum, list);
2390 PUT_REG_NOTE_KIND (note, kind);
2391 break;
2392
2393 default:
2394 note = alloc_EXPR_LIST (kind, datum, list);
2395 break;
2396 }
2397
2398 return note;
2399 }
2400
2401 /* Add register note with kind KIND and datum DATUM to INSN. */
2402
2403 void
2404 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2405 {
2406 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2407 }
2408
2409 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2410
2411 void
2412 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2413 {
2414 gcc_checking_assert (int_reg_note_p (kind));
2415 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2416 datum, REG_NOTES (insn));
2417 }
2418
2419 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2420
2421 void
2422 add_args_size_note (rtx_insn *insn, poly_int64 value)
2423 {
2424 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2425 add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2426 }
2427
2428 /* Add a register note like NOTE to INSN. */
2429
2430 void
2431 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2432 {
2433 if (GET_CODE (note) == INT_LIST)
2434 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2435 else
2436 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2437 }
2438
2439 /* Duplicate NOTE and return the copy. */
2440 rtx
2441 duplicate_reg_note (rtx note)
2442 {
2443 reg_note kind = REG_NOTE_KIND (note);
2444
2445 if (GET_CODE (note) == INT_LIST)
2446 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2447 else if (GET_CODE (note) == EXPR_LIST)
2448 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2449 else
2450 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2451 }
2452
2453 /* Remove register note NOTE from the REG_NOTES of INSN. */
2454
2455 void
2456 remove_note (rtx_insn *insn, const_rtx note)
2457 {
2458 rtx link;
2459
2460 if (note == NULL_RTX)
2461 return;
2462
2463 if (REG_NOTES (insn) == note)
2464 REG_NOTES (insn) = XEXP (note, 1);
2465 else
2466 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2467 if (XEXP (link, 1) == note)
2468 {
2469 XEXP (link, 1) = XEXP (note, 1);
2470 break;
2471 }
2472
2473 switch (REG_NOTE_KIND (note))
2474 {
2475 case REG_EQUAL:
2476 case REG_EQUIV:
2477 df_notes_rescan (insn);
2478 break;
2479 default:
2480 break;
2481 }
2482 }
2483
2484 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2485 Return true if any note has been removed. */
2486
2487 bool
2488 remove_reg_equal_equiv_notes (rtx_insn *insn)
2489 {
2490 rtx *loc;
2491 bool ret = false;
2492
2493 loc = &REG_NOTES (insn);
2494 while (*loc)
2495 {
2496 enum reg_note kind = REG_NOTE_KIND (*loc);
2497 if (kind == REG_EQUAL || kind == REG_EQUIV)
2498 {
2499 *loc = XEXP (*loc, 1);
2500 ret = true;
2501 }
2502 else
2503 loc = &XEXP (*loc, 1);
2504 }
2505 return ret;
2506 }
2507
2508 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2509
2510 void
2511 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2512 {
2513 df_ref eq_use;
2514
2515 if (!df)
2516 return;
2517
2518 /* This loop is a little tricky. We cannot just go down the chain because
2519 it is being modified by some actions in the loop. So we just iterate
2520 over the head. We plan to drain the list anyway. */
2521 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2522 {
2523 rtx_insn *insn = DF_REF_INSN (eq_use);
2524 rtx note = find_reg_equal_equiv_note (insn);
2525
2526 /* This assert is generally triggered when someone deletes a REG_EQUAL
2527 or REG_EQUIV note by hacking the list manually rather than calling
2528 remove_note. */
2529 gcc_assert (note);
2530
2531 remove_note (insn, note);
2532 }
2533 }
2534
2535 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2536 return 1 if it is found. A simple equality test is used to determine if
2537 NODE matches. */
2538
2539 bool
2540 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2541 {
2542 const_rtx x;
2543
2544 for (x = listp; x; x = XEXP (x, 1))
2545 if (node == XEXP (x, 0))
2546 return true;
2547
2548 return false;
2549 }
2550
2551 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2552 remove that entry from the list if it is found.
2553
2554 A simple equality test is used to determine if NODE matches. */
2555
2556 void
2557 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2558 {
2559 rtx_expr_list *temp = *listp;
2560 rtx_expr_list *prev = NULL;
2561
2562 while (temp)
2563 {
2564 if (node == temp->element ())
2565 {
2566 /* Splice the node out of the list. */
2567 if (prev)
2568 XEXP (prev, 1) = temp->next ();
2569 else
2570 *listp = temp->next ();
2571
2572 return;
2573 }
2574
2575 prev = temp;
2576 temp = temp->next ();
2577 }
2578 }
2579
2580 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2581 remove that entry from the list if it is found.
2582
2583 A simple equality test is used to determine if NODE matches. */
2584
2585 void
2586 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2587 {
2588 rtx_insn_list *temp = *listp;
2589 rtx_insn_list *prev = NULL;
2590
2591 while (temp)
2592 {
2593 if (node == temp->insn ())
2594 {
2595 /* Splice the node out of the list. */
2596 if (prev)
2597 XEXP (prev, 1) = temp->next ();
2598 else
2599 *listp = temp->next ();
2600
2601 return;
2602 }
2603
2604 prev = temp;
2605 temp = temp->next ();
2606 }
2607 }
2608 \f
2609 /* Nonzero if X contains any volatile instructions. These are instructions
2610 which may cause unpredictable machine state instructions, and thus no
2611 instructions or register uses should be moved or combined across them.
2612 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2613
2614 int
2615 volatile_insn_p (const_rtx x)
2616 {
2617 const RTX_CODE code = GET_CODE (x);
2618 switch (code)
2619 {
2620 case LABEL_REF:
2621 case SYMBOL_REF:
2622 case CONST:
2623 CASE_CONST_ANY:
2624 case CC0:
2625 case PC:
2626 case REG:
2627 case SCRATCH:
2628 case CLOBBER:
2629 case ADDR_VEC:
2630 case ADDR_DIFF_VEC:
2631 case CALL:
2632 case MEM:
2633 return 0;
2634
2635 case UNSPEC_VOLATILE:
2636 return 1;
2637
2638 case ASM_INPUT:
2639 case ASM_OPERANDS:
2640 if (MEM_VOLATILE_P (x))
2641 return 1;
2642
2643 default:
2644 break;
2645 }
2646
2647 /* Recursively scan the operands of this expression. */
2648
2649 {
2650 const char *const fmt = GET_RTX_FORMAT (code);
2651 int i;
2652
2653 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2654 {
2655 if (fmt[i] == 'e')
2656 {
2657 if (volatile_insn_p (XEXP (x, i)))
2658 return 1;
2659 }
2660 else if (fmt[i] == 'E')
2661 {
2662 int j;
2663 for (j = 0; j < XVECLEN (x, i); j++)
2664 if (volatile_insn_p (XVECEXP (x, i, j)))
2665 return 1;
2666 }
2667 }
2668 }
2669 return 0;
2670 }
2671
2672 /* Nonzero if X contains any volatile memory references
2673 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2674
2675 int
2676 volatile_refs_p (const_rtx x)
2677 {
2678 const RTX_CODE code = GET_CODE (x);
2679 switch (code)
2680 {
2681 case LABEL_REF:
2682 case SYMBOL_REF:
2683 case CONST:
2684 CASE_CONST_ANY:
2685 case CC0:
2686 case PC:
2687 case REG:
2688 case SCRATCH:
2689 case CLOBBER:
2690 case ADDR_VEC:
2691 case ADDR_DIFF_VEC:
2692 return 0;
2693
2694 case UNSPEC_VOLATILE:
2695 return 1;
2696
2697 case MEM:
2698 case ASM_INPUT:
2699 case ASM_OPERANDS:
2700 if (MEM_VOLATILE_P (x))
2701 return 1;
2702
2703 default:
2704 break;
2705 }
2706
2707 /* Recursively scan the operands of this expression. */
2708
2709 {
2710 const char *const fmt = GET_RTX_FORMAT (code);
2711 int i;
2712
2713 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2714 {
2715 if (fmt[i] == 'e')
2716 {
2717 if (volatile_refs_p (XEXP (x, i)))
2718 return 1;
2719 }
2720 else if (fmt[i] == 'E')
2721 {
2722 int j;
2723 for (j = 0; j < XVECLEN (x, i); j++)
2724 if (volatile_refs_p (XVECEXP (x, i, j)))
2725 return 1;
2726 }
2727 }
2728 }
2729 return 0;
2730 }
2731
2732 /* Similar to above, except that it also rejects register pre- and post-
2733 incrementing. */
2734
2735 int
2736 side_effects_p (const_rtx x)
2737 {
2738 const RTX_CODE code = GET_CODE (x);
2739 switch (code)
2740 {
2741 case LABEL_REF:
2742 case SYMBOL_REF:
2743 case CONST:
2744 CASE_CONST_ANY:
2745 case CC0:
2746 case PC:
2747 case REG:
2748 case SCRATCH:
2749 case ADDR_VEC:
2750 case ADDR_DIFF_VEC:
2751 case VAR_LOCATION:
2752 return 0;
2753
2754 case CLOBBER:
2755 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2756 when some combination can't be done. If we see one, don't think
2757 that we can simplify the expression. */
2758 return (GET_MODE (x) != VOIDmode);
2759
2760 case PRE_INC:
2761 case PRE_DEC:
2762 case POST_INC:
2763 case POST_DEC:
2764 case PRE_MODIFY:
2765 case POST_MODIFY:
2766 case CALL:
2767 case UNSPEC_VOLATILE:
2768 return 1;
2769
2770 case MEM:
2771 case ASM_INPUT:
2772 case ASM_OPERANDS:
2773 if (MEM_VOLATILE_P (x))
2774 return 1;
2775
2776 default:
2777 break;
2778 }
2779
2780 /* Recursively scan the operands of this expression. */
2781
2782 {
2783 const char *fmt = GET_RTX_FORMAT (code);
2784 int i;
2785
2786 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2787 {
2788 if (fmt[i] == 'e')
2789 {
2790 if (side_effects_p (XEXP (x, i)))
2791 return 1;
2792 }
2793 else if (fmt[i] == 'E')
2794 {
2795 int j;
2796 for (j = 0; j < XVECLEN (x, i); j++)
2797 if (side_effects_p (XVECEXP (x, i, j)))
2798 return 1;
2799 }
2800 }
2801 }
2802 return 0;
2803 }
2804 \f
2805 /* Return nonzero if evaluating rtx X might cause a trap.
2806 FLAGS controls how to consider MEMs. A nonzero means the context
2807 of the access may have changed from the original, such that the
2808 address may have become invalid. */
2809
2810 int
2811 may_trap_p_1 (const_rtx x, unsigned flags)
2812 {
2813 int i;
2814 enum rtx_code code;
2815 const char *fmt;
2816
2817 /* We make no distinction currently, but this function is part of
2818 the internal target-hooks ABI so we keep the parameter as
2819 "unsigned flags". */
2820 bool code_changed = flags != 0;
2821
2822 if (x == 0)
2823 return 0;
2824 code = GET_CODE (x);
2825 switch (code)
2826 {
2827 /* Handle these cases quickly. */
2828 CASE_CONST_ANY:
2829 case SYMBOL_REF:
2830 case LABEL_REF:
2831 case CONST:
2832 case PC:
2833 case CC0:
2834 case REG:
2835 case SCRATCH:
2836 return 0;
2837
2838 case UNSPEC:
2839 return targetm.unspec_may_trap_p (x, flags);
2840
2841 case UNSPEC_VOLATILE:
2842 case ASM_INPUT:
2843 case TRAP_IF:
2844 return 1;
2845
2846 case ASM_OPERANDS:
2847 return MEM_VOLATILE_P (x);
2848
2849 /* Memory ref can trap unless it's a static var or a stack slot. */
2850 case MEM:
2851 /* Recognize specific pattern of stack checking probes. */
2852 if (flag_stack_check
2853 && MEM_VOLATILE_P (x)
2854 && XEXP (x, 0) == stack_pointer_rtx)
2855 return 1;
2856 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2857 reference; moving it out of context such as when moving code
2858 when optimizing, might cause its address to become invalid. */
2859 code_changed
2860 || !MEM_NOTRAP_P (x))
2861 {
2862 poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2863 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2864 GET_MODE (x), code_changed);
2865 }
2866
2867 return 0;
2868
2869 /* Division by a non-constant might trap. */
2870 case DIV:
2871 case MOD:
2872 case UDIV:
2873 case UMOD:
2874 if (HONOR_SNANS (x))
2875 return 1;
2876 if (FLOAT_MODE_P (GET_MODE (x)))
2877 return flag_trapping_math;
2878 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2879 return 1;
2880 if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
2881 {
2882 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
2883 unsigned int n_elts;
2884 rtx op = XEXP (x, 1);
2885 if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
2886 {
2887 if (!CONST_VECTOR_DUPLICATE_P (op))
2888 return 1;
2889 for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
2890 if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
2891 return 1;
2892 }
2893 else
2894 for (unsigned i = 0; i < n_elts; i++)
2895 if (CONST_VECTOR_ELT (op, i) == const0_rtx)
2896 return 1;
2897 }
2898 break;
2899
2900 case EXPR_LIST:
2901 /* An EXPR_LIST is used to represent a function call. This
2902 certainly may trap. */
2903 return 1;
2904
2905 case GE:
2906 case GT:
2907 case LE:
2908 case LT:
2909 case LTGT:
2910 case COMPARE:
2911 /* Some floating point comparisons may trap. */
2912 if (!flag_trapping_math)
2913 break;
2914 /* ??? There is no machine independent way to check for tests that trap
2915 when COMPARE is used, though many targets do make this distinction.
2916 For instance, sparc uses CCFPE for compares which generate exceptions
2917 and CCFP for compares which do not generate exceptions. */
2918 if (HONOR_NANS (x))
2919 return 1;
2920 /* But often the compare has some CC mode, so check operand
2921 modes as well. */
2922 if (HONOR_NANS (XEXP (x, 0))
2923 || HONOR_NANS (XEXP (x, 1)))
2924 return 1;
2925 break;
2926
2927 case EQ:
2928 case NE:
2929 if (HONOR_SNANS (x))
2930 return 1;
2931 /* Often comparison is CC mode, so check operand modes. */
2932 if (HONOR_SNANS (XEXP (x, 0))
2933 || HONOR_SNANS (XEXP (x, 1)))
2934 return 1;
2935 break;
2936
2937 case FIX:
2938 /* Conversion of floating point might trap. */
2939 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2940 return 1;
2941 break;
2942
2943 case NEG:
2944 case ABS:
2945 case SUBREG:
2946 case VEC_MERGE:
2947 case VEC_SELECT:
2948 case VEC_CONCAT:
2949 case VEC_DUPLICATE:
2950 /* These operations don't trap even with floating point. */
2951 break;
2952
2953 default:
2954 /* Any floating arithmetic may trap. */
2955 if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2956 return 1;
2957 }
2958
2959 fmt = GET_RTX_FORMAT (code);
2960 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2961 {
2962 if (fmt[i] == 'e')
2963 {
2964 if (may_trap_p_1 (XEXP (x, i), flags))
2965 return 1;
2966 }
2967 else if (fmt[i] == 'E')
2968 {
2969 int j;
2970 for (j = 0; j < XVECLEN (x, i); j++)
2971 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2972 return 1;
2973 }
2974 }
2975 return 0;
2976 }
2977
2978 /* Return nonzero if evaluating rtx X might cause a trap. */
2979
2980 int
2981 may_trap_p (const_rtx x)
2982 {
2983 return may_trap_p_1 (x, 0);
2984 }
2985
2986 /* Same as above, but additionally return nonzero if evaluating rtx X might
2987 cause a fault. We define a fault for the purpose of this function as a
2988 erroneous execution condition that cannot be encountered during the normal
2989 execution of a valid program; the typical example is an unaligned memory
2990 access on a strict alignment machine. The compiler guarantees that it
2991 doesn't generate code that will fault from a valid program, but this
2992 guarantee doesn't mean anything for individual instructions. Consider
2993 the following example:
2994
2995 struct S { int d; union { char *cp; int *ip; }; };
2996
2997 int foo(struct S *s)
2998 {
2999 if (s->d == 1)
3000 return *s->ip;
3001 else
3002 return *s->cp;
3003 }
3004
3005 on a strict alignment machine. In a valid program, foo will never be
3006 invoked on a structure for which d is equal to 1 and the underlying
3007 unique field of the union not aligned on a 4-byte boundary, but the
3008 expression *s->ip might cause a fault if considered individually.
3009
3010 At the RTL level, potentially problematic expressions will almost always
3011 verify may_trap_p; for example, the above dereference can be emitted as
3012 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3013 However, suppose that foo is inlined in a caller that causes s->cp to
3014 point to a local character variable and guarantees that s->d is not set
3015 to 1; foo may have been effectively translated into pseudo-RTL as:
3016
3017 if ((reg:SI) == 1)
3018 (set (reg:SI) (mem:SI (%fp - 7)))
3019 else
3020 (set (reg:QI) (mem:QI (%fp - 7)))
3021
3022 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3023 memory reference to a stack slot, but it will certainly cause a fault
3024 on a strict alignment machine. */
3025
3026 int
3027 may_trap_or_fault_p (const_rtx x)
3028 {
3029 return may_trap_p_1 (x, 1);
3030 }
3031 \f
3032 /* Return nonzero if X contains a comparison that is not either EQ or NE,
3033 i.e., an inequality. */
3034
3035 int
3036 inequality_comparisons_p (const_rtx x)
3037 {
3038 const char *fmt;
3039 int len, i;
3040 const enum rtx_code code = GET_CODE (x);
3041
3042 switch (code)
3043 {
3044 case REG:
3045 case SCRATCH:
3046 case PC:
3047 case CC0:
3048 CASE_CONST_ANY:
3049 case CONST:
3050 case LABEL_REF:
3051 case SYMBOL_REF:
3052 return 0;
3053
3054 case LT:
3055 case LTU:
3056 case GT:
3057 case GTU:
3058 case LE:
3059 case LEU:
3060 case GE:
3061 case GEU:
3062 return 1;
3063
3064 default:
3065 break;
3066 }
3067
3068 len = GET_RTX_LENGTH (code);
3069 fmt = GET_RTX_FORMAT (code);
3070
3071 for (i = 0; i < len; i++)
3072 {
3073 if (fmt[i] == 'e')
3074 {
3075 if (inequality_comparisons_p (XEXP (x, i)))
3076 return 1;
3077 }
3078 else if (fmt[i] == 'E')
3079 {
3080 int j;
3081 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3082 if (inequality_comparisons_p (XVECEXP (x, i, j)))
3083 return 1;
3084 }
3085 }
3086
3087 return 0;
3088 }
3089 \f
3090 /* Replace any occurrence of FROM in X with TO. The function does
3091 not enter into CONST_DOUBLE for the replace.
3092
3093 Note that copying is not done so X must not be shared unless all copies
3094 are to be modified.
3095
3096 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3097 those pointer-equal ones. */
3098
3099 rtx
3100 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3101 {
3102 int i, j;
3103 const char *fmt;
3104
3105 if (x == from)
3106 return to;
3107
3108 /* Allow this function to make replacements in EXPR_LISTs. */
3109 if (x == 0)
3110 return 0;
3111
3112 if (all_regs
3113 && REG_P (x)
3114 && REG_P (from)
3115 && REGNO (x) == REGNO (from))
3116 {
3117 gcc_assert (GET_MODE (x) == GET_MODE (from));
3118 return to;
3119 }
3120 else if (GET_CODE (x) == SUBREG)
3121 {
3122 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3123
3124 if (CONST_INT_P (new_rtx))
3125 {
3126 x = simplify_subreg (GET_MODE (x), new_rtx,
3127 GET_MODE (SUBREG_REG (x)),
3128 SUBREG_BYTE (x));
3129 gcc_assert (x);
3130 }
3131 else
3132 SUBREG_REG (x) = new_rtx;
3133
3134 return x;
3135 }
3136 else if (GET_CODE (x) == ZERO_EXTEND)
3137 {
3138 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3139
3140 if (CONST_INT_P (new_rtx))
3141 {
3142 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3143 new_rtx, GET_MODE (XEXP (x, 0)));
3144 gcc_assert (x);
3145 }
3146 else
3147 XEXP (x, 0) = new_rtx;
3148
3149 return x;
3150 }
3151
3152 fmt = GET_RTX_FORMAT (GET_CODE (x));
3153 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3154 {
3155 if (fmt[i] == 'e')
3156 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3157 else if (fmt[i] == 'E')
3158 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3159 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3160 from, to, all_regs);
3161 }
3162
3163 return x;
3164 }
3165 \f
3166 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3167 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3168
3169 void
3170 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3171 {
3172 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3173 rtx x = *loc;
3174 if (JUMP_TABLE_DATA_P (x))
3175 {
3176 x = PATTERN (x);
3177 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3178 int len = GET_NUM_ELEM (vec);
3179 for (int i = 0; i < len; ++i)
3180 {
3181 rtx ref = RTVEC_ELT (vec, i);
3182 if (XEXP (ref, 0) == old_label)
3183 {
3184 XEXP (ref, 0) = new_label;
3185 if (update_label_nuses)
3186 {
3187 ++LABEL_NUSES (new_label);
3188 --LABEL_NUSES (old_label);
3189 }
3190 }
3191 }
3192 return;
3193 }
3194
3195 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3196 field. This is not handled by the iterator because it doesn't
3197 handle unprinted ('0') fields. */
3198 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3199 JUMP_LABEL (x) = new_label;
3200
3201 subrtx_ptr_iterator::array_type array;
3202 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3203 {
3204 rtx *loc = *iter;
3205 if (rtx x = *loc)
3206 {
3207 if (GET_CODE (x) == SYMBOL_REF
3208 && CONSTANT_POOL_ADDRESS_P (x))
3209 {
3210 rtx c = get_pool_constant (x);
3211 if (rtx_referenced_p (old_label, c))
3212 {
3213 /* Create a copy of constant C; replace the label inside
3214 but do not update LABEL_NUSES because uses in constant pool
3215 are not counted. */
3216 rtx new_c = copy_rtx (c);
3217 replace_label (&new_c, old_label, new_label, false);
3218
3219 /* Add the new constant NEW_C to constant pool and replace
3220 the old reference to constant by new reference. */
3221 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3222 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3223 }
3224 }
3225
3226 if ((GET_CODE (x) == LABEL_REF
3227 || GET_CODE (x) == INSN_LIST)
3228 && XEXP (x, 0) == old_label)
3229 {
3230 XEXP (x, 0) = new_label;
3231 if (update_label_nuses)
3232 {
3233 ++LABEL_NUSES (new_label);
3234 --LABEL_NUSES (old_label);
3235 }
3236 }
3237 }
3238 }
3239 }
3240
3241 void
3242 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3243 rtx_insn *new_label, bool update_label_nuses)
3244 {
3245 rtx insn_as_rtx = insn;
3246 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3247 gcc_checking_assert (insn_as_rtx == insn);
3248 }
3249
3250 /* Return true if X is referenced in BODY. */
3251
3252 bool
3253 rtx_referenced_p (const_rtx x, const_rtx body)
3254 {
3255 subrtx_iterator::array_type array;
3256 FOR_EACH_SUBRTX (iter, array, body, ALL)
3257 if (const_rtx y = *iter)
3258 {
3259 /* Check if a label_ref Y refers to label X. */
3260 if (GET_CODE (y) == LABEL_REF
3261 && LABEL_P (x)
3262 && label_ref_label (y) == x)
3263 return true;
3264
3265 if (rtx_equal_p (x, y))
3266 return true;
3267
3268 /* If Y is a reference to pool constant traverse the constant. */
3269 if (GET_CODE (y) == SYMBOL_REF
3270 && CONSTANT_POOL_ADDRESS_P (y))
3271 iter.substitute (get_pool_constant (y));
3272 }
3273 return false;
3274 }
3275
3276 /* If INSN is a tablejump return true and store the label (before jump table) to
3277 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3278
3279 bool
3280 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3281 rtx_jump_table_data **tablep)
3282 {
3283 if (!JUMP_P (insn))
3284 return false;
3285
3286 rtx target = JUMP_LABEL (insn);
3287 if (target == NULL_RTX || ANY_RETURN_P (target))
3288 return false;
3289
3290 rtx_insn *label = as_a<rtx_insn *> (target);
3291 rtx_insn *table = next_insn (label);
3292 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3293 return false;
3294
3295 if (labelp)
3296 *labelp = label;
3297 if (tablep)
3298 *tablep = as_a <rtx_jump_table_data *> (table);
3299 return true;
3300 }
3301
3302 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3303 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3304
3305 rtx
3306 tablejump_casesi_pattern (const rtx_insn *insn)
3307 {
3308 rtx tmp;
3309
3310 if ((tmp = single_set (insn)) != NULL
3311 && SET_DEST (tmp) == pc_rtx
3312 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3313 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3314 return tmp;
3315
3316 return NULL_RTX;
3317 }
3318
3319 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3320 constant that is not in the constant pool and not in the condition
3321 of an IF_THEN_ELSE. */
3322
3323 static int
3324 computed_jump_p_1 (const_rtx x)
3325 {
3326 const enum rtx_code code = GET_CODE (x);
3327 int i, j;
3328 const char *fmt;
3329
3330 switch (code)
3331 {
3332 case LABEL_REF:
3333 case PC:
3334 return 0;
3335
3336 case CONST:
3337 CASE_CONST_ANY:
3338 case SYMBOL_REF:
3339 case REG:
3340 return 1;
3341
3342 case MEM:
3343 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3344 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3345
3346 case IF_THEN_ELSE:
3347 return (computed_jump_p_1 (XEXP (x, 1))
3348 || computed_jump_p_1 (XEXP (x, 2)));
3349
3350 default:
3351 break;
3352 }
3353
3354 fmt = GET_RTX_FORMAT (code);
3355 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3356 {
3357 if (fmt[i] == 'e'
3358 && computed_jump_p_1 (XEXP (x, i)))
3359 return 1;
3360
3361 else if (fmt[i] == 'E')
3362 for (j = 0; j < XVECLEN (x, i); j++)
3363 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3364 return 1;
3365 }
3366
3367 return 0;
3368 }
3369
3370 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3371
3372 Tablejumps and casesi insns are not considered indirect jumps;
3373 we can recognize them by a (use (label_ref)). */
3374
3375 int
3376 computed_jump_p (const rtx_insn *insn)
3377 {
3378 int i;
3379 if (JUMP_P (insn))
3380 {
3381 rtx pat = PATTERN (insn);
3382
3383 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3384 if (JUMP_LABEL (insn) != NULL)
3385 return 0;
3386
3387 if (GET_CODE (pat) == PARALLEL)
3388 {
3389 int len = XVECLEN (pat, 0);
3390 int has_use_labelref = 0;
3391
3392 for (i = len - 1; i >= 0; i--)
3393 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3394 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3395 == LABEL_REF))
3396 {
3397 has_use_labelref = 1;
3398 break;
3399 }
3400
3401 if (! has_use_labelref)
3402 for (i = len - 1; i >= 0; i--)
3403 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3404 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3405 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3406 return 1;
3407 }
3408 else if (GET_CODE (pat) == SET
3409 && SET_DEST (pat) == pc_rtx
3410 && computed_jump_p_1 (SET_SRC (pat)))
3411 return 1;
3412 }
3413 return 0;
3414 }
3415
3416 \f
3417
3418 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3419 the equivalent add insn and pass the result to FN, using DATA as the
3420 final argument. */
3421
3422 static int
3423 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3424 {
3425 rtx x = XEXP (mem, 0);
3426 switch (GET_CODE (x))
3427 {
3428 case PRE_INC:
3429 case POST_INC:
3430 {
3431 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3432 rtx r1 = XEXP (x, 0);
3433 rtx c = gen_int_mode (size, GET_MODE (r1));
3434 return fn (mem, x, r1, r1, c, data);
3435 }
3436
3437 case PRE_DEC:
3438 case POST_DEC:
3439 {
3440 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3441 rtx r1 = XEXP (x, 0);
3442 rtx c = gen_int_mode (-size, GET_MODE (r1));
3443 return fn (mem, x, r1, r1, c, data);
3444 }
3445
3446 case PRE_MODIFY:
3447 case POST_MODIFY:
3448 {
3449 rtx r1 = XEXP (x, 0);
3450 rtx add = XEXP (x, 1);
3451 return fn (mem, x, r1, add, NULL, data);
3452 }
3453
3454 default:
3455 gcc_unreachable ();
3456 }
3457 }
3458
3459 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3460 For each such autoinc operation found, call FN, passing it
3461 the innermost enclosing MEM, the operation itself, the RTX modified
3462 by the operation, two RTXs (the second may be NULL) that, once
3463 added, represent the value to be held by the modified RTX
3464 afterwards, and DATA. FN is to return 0 to continue the
3465 traversal or any other value to have it returned to the caller of
3466 for_each_inc_dec. */
3467
3468 int
3469 for_each_inc_dec (rtx x,
3470 for_each_inc_dec_fn fn,
3471 void *data)
3472 {
3473 subrtx_var_iterator::array_type array;
3474 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3475 {
3476 rtx mem = *iter;
3477 if (mem
3478 && MEM_P (mem)
3479 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3480 {
3481 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3482 if (res != 0)
3483 return res;
3484 iter.skip_subrtxes ();
3485 }
3486 }
3487 return 0;
3488 }
3489
3490 \f
3491 /* Searches X for any reference to REGNO, returning the rtx of the
3492 reference found if any. Otherwise, returns NULL_RTX. */
3493
3494 rtx
3495 regno_use_in (unsigned int regno, rtx x)
3496 {
3497 const char *fmt;
3498 int i, j;
3499 rtx tem;
3500
3501 if (REG_P (x) && REGNO (x) == regno)
3502 return x;
3503
3504 fmt = GET_RTX_FORMAT (GET_CODE (x));
3505 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3506 {
3507 if (fmt[i] == 'e')
3508 {
3509 if ((tem = regno_use_in (regno, XEXP (x, i))))
3510 return tem;
3511 }
3512 else if (fmt[i] == 'E')
3513 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3514 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3515 return tem;
3516 }
3517
3518 return NULL_RTX;
3519 }
3520
3521 /* Return a value indicating whether OP, an operand of a commutative
3522 operation, is preferred as the first or second operand. The more
3523 positive the value, the stronger the preference for being the first
3524 operand. */
3525
3526 int
3527 commutative_operand_precedence (rtx op)
3528 {
3529 enum rtx_code code = GET_CODE (op);
3530
3531 /* Constants always become the second operand. Prefer "nice" constants. */
3532 if (code == CONST_INT)
3533 return -10;
3534 if (code == CONST_WIDE_INT)
3535 return -9;
3536 if (code == CONST_POLY_INT)
3537 return -8;
3538 if (code == CONST_DOUBLE)
3539 return -8;
3540 if (code == CONST_FIXED)
3541 return -8;
3542 op = avoid_constant_pool_reference (op);
3543 code = GET_CODE (op);
3544
3545 switch (GET_RTX_CLASS (code))
3546 {
3547 case RTX_CONST_OBJ:
3548 if (code == CONST_INT)
3549 return -7;
3550 if (code == CONST_WIDE_INT)
3551 return -6;
3552 if (code == CONST_POLY_INT)
3553 return -5;
3554 if (code == CONST_DOUBLE)
3555 return -5;
3556 if (code == CONST_FIXED)
3557 return -5;
3558 return -4;
3559
3560 case RTX_EXTRA:
3561 /* SUBREGs of objects should come second. */
3562 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3563 return -3;
3564 return 0;
3565
3566 case RTX_OBJ:
3567 /* Complex expressions should be the first, so decrease priority
3568 of objects. Prefer pointer objects over non pointer objects. */
3569 if ((REG_P (op) && REG_POINTER (op))
3570 || (MEM_P (op) && MEM_POINTER (op)))
3571 return -1;
3572 return -2;
3573
3574 case RTX_COMM_ARITH:
3575 /* Prefer operands that are themselves commutative to be first.
3576 This helps to make things linear. In particular,
3577 (and (and (reg) (reg)) (not (reg))) is canonical. */
3578 return 4;
3579
3580 case RTX_BIN_ARITH:
3581 /* If only one operand is a binary expression, it will be the first
3582 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3583 is canonical, although it will usually be further simplified. */
3584 return 2;
3585
3586 case RTX_UNARY:
3587 /* Then prefer NEG and NOT. */
3588 if (code == NEG || code == NOT)
3589 return 1;
3590 /* FALLTHRU */
3591
3592 default:
3593 return 0;
3594 }
3595 }
3596
3597 /* Return 1 iff it is necessary to swap operands of commutative operation
3598 in order to canonicalize expression. */
3599
3600 bool
3601 swap_commutative_operands_p (rtx x, rtx y)
3602 {
3603 return (commutative_operand_precedence (x)
3604 < commutative_operand_precedence (y));
3605 }
3606
3607 /* Return 1 if X is an autoincrement side effect and the register is
3608 not the stack pointer. */
3609 int
3610 auto_inc_p (const_rtx x)
3611 {
3612 switch (GET_CODE (x))
3613 {
3614 case PRE_INC:
3615 case POST_INC:
3616 case PRE_DEC:
3617 case POST_DEC:
3618 case PRE_MODIFY:
3619 case POST_MODIFY:
3620 /* There are no REG_INC notes for SP. */
3621 if (XEXP (x, 0) != stack_pointer_rtx)
3622 return 1;
3623 default:
3624 break;
3625 }
3626 return 0;
3627 }
3628
3629 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3630 int
3631 loc_mentioned_in_p (rtx *loc, const_rtx in)
3632 {
3633 enum rtx_code code;
3634 const char *fmt;
3635 int i, j;
3636
3637 if (!in)
3638 return 0;
3639
3640 code = GET_CODE (in);
3641 fmt = GET_RTX_FORMAT (code);
3642 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3643 {
3644 if (fmt[i] == 'e')
3645 {
3646 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3647 return 1;
3648 }
3649 else if (fmt[i] == 'E')
3650 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3651 if (loc == &XVECEXP (in, i, j)
3652 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3653 return 1;
3654 }
3655 return 0;
3656 }
3657
3658 /* Reinterpret a subreg as a bit extraction from an integer and return
3659 the position of the least significant bit of the extracted value.
3660 In other words, if the extraction were performed as a shift right
3661 and mask, return the number of bits to shift right.
3662
3663 The outer value of the subreg has OUTER_BYTES bytes and starts at
3664 byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes. */
3665
3666 poly_uint64
3667 subreg_size_lsb (poly_uint64 outer_bytes,
3668 poly_uint64 inner_bytes,
3669 poly_uint64 subreg_byte)
3670 {
3671 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3672
3673 /* A paradoxical subreg begins at bit position 0. */
3674 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3675 if (maybe_gt (outer_bytes, inner_bytes))
3676 {
3677 gcc_checking_assert (known_eq (subreg_byte, 0U));
3678 return 0;
3679 }
3680
3681 subreg_end = subreg_byte + outer_bytes;
3682 trailing_bytes = inner_bytes - subreg_end;
3683 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3684 byte_pos = trailing_bytes;
3685 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3686 byte_pos = subreg_byte;
3687 else
3688 {
3689 /* When bytes and words have opposite endianness, we must be able
3690 to split offsets into words and bytes at compile time. */
3691 poly_uint64 leading_word_part
3692 = force_align_down (subreg_byte, UNITS_PER_WORD);
3693 poly_uint64 trailing_word_part
3694 = force_align_down (trailing_bytes, UNITS_PER_WORD);
3695 /* If the subreg crosses a word boundary ensure that
3696 it also begins and ends on a word boundary. */
3697 gcc_assert (known_le (subreg_end - leading_word_part,
3698 (unsigned int) UNITS_PER_WORD)
3699 || (known_eq (leading_word_part, subreg_byte)
3700 && known_eq (trailing_word_part, trailing_bytes)));
3701 if (WORDS_BIG_ENDIAN)
3702 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3703 else
3704 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3705 }
3706
3707 return byte_pos * BITS_PER_UNIT;
3708 }
3709
3710 /* Given a subreg X, return the bit offset where the subreg begins
3711 (counting from the least significant bit of the reg). */
3712
3713 poly_uint64
3714 subreg_lsb (const_rtx x)
3715 {
3716 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3717 SUBREG_BYTE (x));
3718 }
3719
3720 /* Return the subreg byte offset for a subreg whose outer value has
3721 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3722 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3723 lsb of the inner value. This is the inverse of the calculation
3724 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3725
3726 poly_uint64
3727 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3728 poly_uint64 lsb_shift)
3729 {
3730 /* A paradoxical subreg begins at bit position 0. */
3731 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3732 if (maybe_gt (outer_bytes, inner_bytes))
3733 {
3734 gcc_checking_assert (known_eq (lsb_shift, 0U));
3735 return 0;
3736 }
3737
3738 poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3739 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3740 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3741 return upper_bytes;
3742 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3743 return lower_bytes;
3744 else
3745 {
3746 /* When bytes and words have opposite endianness, we must be able
3747 to split offsets into words and bytes at compile time. */
3748 poly_uint64 lower_word_part = force_align_down (lower_bytes,
3749 UNITS_PER_WORD);
3750 poly_uint64 upper_word_part = force_align_down (upper_bytes,
3751 UNITS_PER_WORD);
3752 if (WORDS_BIG_ENDIAN)
3753 return upper_word_part + (lower_bytes - lower_word_part);
3754 else
3755 return lower_word_part + (upper_bytes - upper_word_part);
3756 }
3757 }
3758
3759 /* Fill in information about a subreg of a hard register.
3760 xregno - A regno of an inner hard subreg_reg (or what will become one).
3761 xmode - The mode of xregno.
3762 offset - The byte offset.
3763 ymode - The mode of a top level SUBREG (or what may become one).
3764 info - Pointer to structure to fill in.
3765
3766 Rather than considering one particular inner register (and thus one
3767 particular "outer" register) in isolation, this function really uses
3768 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3769 function does not check whether adding INFO->offset to XREGNO gives
3770 a valid hard register; even if INFO->offset + XREGNO is out of range,
3771 there might be another register of the same type that is in range.
3772 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3773 the new register, since that can depend on things like whether the final
3774 register number is even or odd. Callers that want to check whether
3775 this particular subreg can be replaced by a simple (reg ...) should
3776 use simplify_subreg_regno. */
3777
3778 void
3779 subreg_get_info (unsigned int xregno, machine_mode xmode,
3780 poly_uint64 offset, machine_mode ymode,
3781 struct subreg_info *info)
3782 {
3783 unsigned int nregs_xmode, nregs_ymode;
3784
3785 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3786
3787 poly_uint64 xsize = GET_MODE_SIZE (xmode);
3788 poly_uint64 ysize = GET_MODE_SIZE (ymode);
3789
3790 bool rknown = false;
3791
3792 /* If the register representation of a non-scalar mode has holes in it,
3793 we expect the scalar units to be concatenated together, with the holes
3794 distributed evenly among the scalar units. Each scalar unit must occupy
3795 at least one register. */
3796 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3797 {
3798 /* As a consequence, we must be dealing with a constant number of
3799 scalars, and thus a constant offset and number of units. */
3800 HOST_WIDE_INT coffset = offset.to_constant ();
3801 HOST_WIDE_INT cysize = ysize.to_constant ();
3802 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3803 unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3804 scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3805 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3806 gcc_assert (nregs_xmode
3807 == (nunits
3808 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3809 gcc_assert (hard_regno_nregs (xregno, xmode)
3810 == hard_regno_nregs (xregno, xmode_unit) * nunits);
3811
3812 /* You can only ask for a SUBREG of a value with holes in the middle
3813 if you don't cross the holes. (Such a SUBREG should be done by
3814 picking a different register class, or doing it in memory if
3815 necessary.) An example of a value with holes is XCmode on 32-bit
3816 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3817 3 for each part, but in memory it's two 128-bit parts.
3818 Padding is assumed to be at the end (not necessarily the 'high part')
3819 of each unit. */
3820 if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3821 && (coffset / GET_MODE_SIZE (xmode_unit)
3822 != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3823 {
3824 info->representable_p = false;
3825 rknown = true;
3826 }
3827 }
3828 else
3829 nregs_xmode = hard_regno_nregs (xregno, xmode);
3830
3831 nregs_ymode = hard_regno_nregs (xregno, ymode);
3832
3833 /* Subreg sizes must be ordered, so that we can tell whether they are
3834 partial, paradoxical or complete. */
3835 gcc_checking_assert (ordered_p (xsize, ysize));
3836
3837 /* Paradoxical subregs are otherwise valid. */
3838 if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3839 {
3840 info->representable_p = true;
3841 /* If this is a big endian paradoxical subreg, which uses more
3842 actual hard registers than the original register, we must
3843 return a negative offset so that we find the proper highpart
3844 of the register.
3845
3846 We assume that the ordering of registers within a multi-register
3847 value has a consistent endianness: if bytes and register words
3848 have different endianness, the hard registers that make up a
3849 multi-register value must be at least word-sized. */
3850 if (REG_WORDS_BIG_ENDIAN)
3851 info->offset = (int) nregs_xmode - (int) nregs_ymode;
3852 else
3853 info->offset = 0;
3854 info->nregs = nregs_ymode;
3855 return;
3856 }
3857
3858 /* If registers store different numbers of bits in the different
3859 modes, we cannot generally form this subreg. */
3860 poly_uint64 regsize_xmode, regsize_ymode;
3861 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3862 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3863 && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3864 && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3865 {
3866 if (!rknown
3867 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3868 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3869 {
3870 info->representable_p = false;
3871 if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3872 || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3873 /* Checked by validate_subreg. We must know at compile time
3874 which inner registers are being accessed. */
3875 gcc_unreachable ();
3876 return;
3877 }
3878 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3879 would go outside of XMODE. */
3880 if (!rknown && maybe_gt (ysize + offset, xsize))
3881 {
3882 info->representable_p = false;
3883 info->nregs = nregs_ymode;
3884 if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3885 /* Checked by validate_subreg. We must know at compile time
3886 which inner registers are being accessed. */
3887 gcc_unreachable ();
3888 return;
3889 }
3890 /* Quick exit for the simple and common case of extracting whole
3891 subregisters from a multiregister value. */
3892 /* ??? It would be better to integrate this into the code below,
3893 if we can generalize the concept enough and figure out how
3894 odd-sized modes can coexist with the other weird cases we support. */
3895 HOST_WIDE_INT count;
3896 if (!rknown
3897 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3898 && known_eq (regsize_xmode, regsize_ymode)
3899 && constant_multiple_p (offset, regsize_ymode, &count))
3900 {
3901 info->representable_p = true;
3902 info->nregs = nregs_ymode;
3903 info->offset = count;
3904 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3905 return;
3906 }
3907 }
3908
3909 /* Lowpart subregs are otherwise valid. */
3910 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3911 {
3912 info->representable_p = true;
3913 rknown = true;
3914
3915 if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3916 {
3917 info->offset = 0;
3918 info->nregs = nregs_ymode;
3919 return;
3920 }
3921 }
3922
3923 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3924 values there are in (reg:XMODE XREGNO). We can view the register
3925 as consisting of this number of independent "blocks", where each
3926 block occupies NREGS_YMODE registers and contains exactly one
3927 representable YMODE value. */
3928 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3929 unsigned int num_blocks = nregs_xmode / nregs_ymode;
3930
3931 /* Calculate the number of bytes in each block. This must always
3932 be exact, otherwise we don't know how to verify the constraint.
3933 These conditions may be relaxed but subreg_regno_offset would
3934 need to be redesigned. */
3935 poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3936
3937 /* Get the number of the first block that contains the subreg and the byte
3938 offset of the subreg from the start of that block. */
3939 unsigned int block_number;
3940 poly_uint64 subblock_offset;
3941 if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3942 &subblock_offset))
3943 /* Checked by validate_subreg. We must know at compile time which
3944 inner registers are being accessed. */
3945 gcc_unreachable ();
3946
3947 if (!rknown)
3948 {
3949 /* Only the lowpart of each block is representable. */
3950 info->representable_p
3951 = known_eq (subblock_offset,
3952 subreg_size_lowpart_offset (ysize, bytes_per_block));
3953 rknown = true;
3954 }
3955
3956 /* We assume that the ordering of registers within a multi-register
3957 value has a consistent endianness: if bytes and register words
3958 have different endianness, the hard registers that make up a
3959 multi-register value must be at least word-sized. */
3960 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3961 /* The block number we calculated above followed memory endianness.
3962 Convert it to register endianness by counting back from the end.
3963 (Note that, because of the assumption above, each block must be
3964 at least word-sized.) */
3965 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3966 else
3967 info->offset = block_number * nregs_ymode;
3968 info->nregs = nregs_ymode;
3969 }
3970
3971 /* This function returns the regno offset of a subreg expression.
3972 xregno - A regno of an inner hard subreg_reg (or what will become one).
3973 xmode - The mode of xregno.
3974 offset - The byte offset.
3975 ymode - The mode of a top level SUBREG (or what may become one).
3976 RETURN - The regno offset which would be used. */
3977 unsigned int
3978 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3979 poly_uint64 offset, machine_mode ymode)
3980 {
3981 struct subreg_info info;
3982 subreg_get_info (xregno, xmode, offset, ymode, &info);
3983 return info.offset;
3984 }
3985
3986 /* This function returns true when the offset is representable via
3987 subreg_offset in the given regno.
3988 xregno - A regno of an inner hard subreg_reg (or what will become one).
3989 xmode - The mode of xregno.
3990 offset - The byte offset.
3991 ymode - The mode of a top level SUBREG (or what may become one).
3992 RETURN - Whether the offset is representable. */
3993 bool
3994 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3995 poly_uint64 offset, machine_mode ymode)
3996 {
3997 struct subreg_info info;
3998 subreg_get_info (xregno, xmode, offset, ymode, &info);
3999 return info.representable_p;
4000 }
4001
4002 /* Return the number of a YMODE register to which
4003
4004 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
4005
4006 can be simplified. Return -1 if the subreg can't be simplified.
4007
4008 XREGNO is a hard register number. */
4009
4010 int
4011 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
4012 poly_uint64 offset, machine_mode ymode)
4013 {
4014 struct subreg_info info;
4015 unsigned int yregno;
4016
4017 /* Give the backend a chance to disallow the mode change. */
4018 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
4019 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
4020 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
4021 /* We can use mode change in LRA for some transformations. */
4022 && ! lra_in_progress)
4023 return -1;
4024
4025 /* We shouldn't simplify stack-related registers. */
4026 if ((!reload_completed || frame_pointer_needed)
4027 && xregno == FRAME_POINTER_REGNUM)
4028 return -1;
4029
4030 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4031 && xregno == ARG_POINTER_REGNUM)
4032 return -1;
4033
4034 if (xregno == STACK_POINTER_REGNUM
4035 /* We should convert hard stack register in LRA if it is
4036 possible. */
4037 && ! lra_in_progress)
4038 return -1;
4039
4040 /* Try to get the register offset. */
4041 subreg_get_info (xregno, xmode, offset, ymode, &info);
4042 if (!info.representable_p)
4043 return -1;
4044
4045 /* Make sure that the offsetted register value is in range. */
4046 yregno = xregno + info.offset;
4047 if (!HARD_REGISTER_NUM_P (yregno))
4048 return -1;
4049
4050 /* See whether (reg:YMODE YREGNO) is valid.
4051
4052 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4053 This is a kludge to work around how complex FP arguments are passed
4054 on IA-64 and should be fixed. See PR target/49226. */
4055 if (!targetm.hard_regno_mode_ok (yregno, ymode)
4056 && targetm.hard_regno_mode_ok (xregno, xmode))
4057 return -1;
4058
4059 return (int) yregno;
4060 }
4061
4062 /* Return the final regno that a subreg expression refers to. */
4063 unsigned int
4064 subreg_regno (const_rtx x)
4065 {
4066 unsigned int ret;
4067 rtx subreg = SUBREG_REG (x);
4068 int regno = REGNO (subreg);
4069
4070 ret = regno + subreg_regno_offset (regno,
4071 GET_MODE (subreg),
4072 SUBREG_BYTE (x),
4073 GET_MODE (x));
4074 return ret;
4075
4076 }
4077
4078 /* Return the number of registers that a subreg expression refers
4079 to. */
4080 unsigned int
4081 subreg_nregs (const_rtx x)
4082 {
4083 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4084 }
4085
4086 /* Return the number of registers that a subreg REG with REGNO
4087 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4088 changed so that the regno can be passed in. */
4089
4090 unsigned int
4091 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4092 {
4093 struct subreg_info info;
4094 rtx subreg = SUBREG_REG (x);
4095
4096 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4097 &info);
4098 return info.nregs;
4099 }
4100
4101 struct parms_set_data
4102 {
4103 int nregs;
4104 HARD_REG_SET regs;
4105 };
4106
4107 /* Helper function for noticing stores to parameter registers. */
4108 static void
4109 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4110 {
4111 struct parms_set_data *const d = (struct parms_set_data *) data;
4112 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4113 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4114 {
4115 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4116 d->nregs--;
4117 }
4118 }
4119
4120 /* Look backward for first parameter to be loaded.
4121 Note that loads of all parameters will not necessarily be
4122 found if CSE has eliminated some of them (e.g., an argument
4123 to the outer function is passed down as a parameter).
4124 Do not skip BOUNDARY. */
4125 rtx_insn *
4126 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4127 {
4128 struct parms_set_data parm;
4129 rtx p;
4130 rtx_insn *before, *first_set;
4131
4132 /* Since different machines initialize their parameter registers
4133 in different orders, assume nothing. Collect the set of all
4134 parameter registers. */
4135 CLEAR_HARD_REG_SET (parm.regs);
4136 parm.nregs = 0;
4137 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4138 if (GET_CODE (XEXP (p, 0)) == USE
4139 && REG_P (XEXP (XEXP (p, 0), 0))
4140 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4141 {
4142 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4143
4144 /* We only care about registers which can hold function
4145 arguments. */
4146 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4147 continue;
4148
4149 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4150 parm.nregs++;
4151 }
4152 before = call_insn;
4153 first_set = call_insn;
4154
4155 /* Search backward for the first set of a register in this set. */
4156 while (parm.nregs && before != boundary)
4157 {
4158 before = PREV_INSN (before);
4159
4160 /* It is possible that some loads got CSEed from one call to
4161 another. Stop in that case. */
4162 if (CALL_P (before))
4163 break;
4164
4165 /* Our caller needs either ensure that we will find all sets
4166 (in case code has not been optimized yet), or take care
4167 for possible labels in a way by setting boundary to preceding
4168 CODE_LABEL. */
4169 if (LABEL_P (before))
4170 {
4171 gcc_assert (before == boundary);
4172 break;
4173 }
4174
4175 if (INSN_P (before))
4176 {
4177 int nregs_old = parm.nregs;
4178 note_stores (before, parms_set, &parm);
4179 /* If we found something that did not set a parameter reg,
4180 we're done. Do not keep going, as that might result
4181 in hoisting an insn before the setting of a pseudo
4182 that is used by the hoisted insn. */
4183 if (nregs_old != parm.nregs)
4184 first_set = before;
4185 else
4186 break;
4187 }
4188 }
4189 return first_set;
4190 }
4191
4192 /* Return true if we should avoid inserting code between INSN and preceding
4193 call instruction. */
4194
4195 bool
4196 keep_with_call_p (const rtx_insn *insn)
4197 {
4198 rtx set;
4199
4200 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4201 {
4202 if (REG_P (SET_DEST (set))
4203 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4204 && fixed_regs[REGNO (SET_DEST (set))]
4205 && general_operand (SET_SRC (set), VOIDmode))
4206 return true;
4207 if (REG_P (SET_SRC (set))
4208 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4209 && REG_P (SET_DEST (set))
4210 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4211 return true;
4212 /* There may be a stack pop just after the call and before the store
4213 of the return register. Search for the actual store when deciding
4214 if we can break or not. */
4215 if (SET_DEST (set) == stack_pointer_rtx)
4216 {
4217 /* This CONST_CAST is okay because next_nonnote_insn just
4218 returns its argument and we assign it to a const_rtx
4219 variable. */
4220 const rtx_insn *i2
4221 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4222 if (i2 && keep_with_call_p (i2))
4223 return true;
4224 }
4225 }
4226 return false;
4227 }
4228
4229 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4230 to non-complex jumps. That is, direct unconditional, conditional,
4231 and tablejumps, but not computed jumps or returns. It also does
4232 not apply to the fallthru case of a conditional jump. */
4233
4234 bool
4235 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4236 {
4237 rtx tmp = JUMP_LABEL (jump_insn);
4238 rtx_jump_table_data *table;
4239
4240 if (label == tmp)
4241 return true;
4242
4243 if (tablejump_p (jump_insn, NULL, &table))
4244 {
4245 rtvec vec = table->get_labels ();
4246 int i, veclen = GET_NUM_ELEM (vec);
4247
4248 for (i = 0; i < veclen; ++i)
4249 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4250 return true;
4251 }
4252
4253 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4254 return true;
4255
4256 return false;
4257 }
4258
4259 \f
4260 /* Return an estimate of the cost of computing rtx X.
4261 One use is in cse, to decide which expression to keep in the hash table.
4262 Another is in rtl generation, to pick the cheapest way to multiply.
4263 Other uses like the latter are expected in the future.
4264
4265 X appears as operand OPNO in an expression with code OUTER_CODE.
4266 SPEED specifies whether costs optimized for speed or size should
4267 be returned. */
4268
4269 int
4270 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4271 int opno, bool speed)
4272 {
4273 int i, j;
4274 enum rtx_code code;
4275 const char *fmt;
4276 int total;
4277 int factor;
4278
4279 if (x == 0)
4280 return 0;
4281
4282 if (GET_MODE (x) != VOIDmode)
4283 mode = GET_MODE (x);
4284
4285 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4286 many insns, taking N times as long. */
4287 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4288 if (factor == 0)
4289 factor = 1;
4290
4291 /* Compute the default costs of certain things.
4292 Note that targetm.rtx_costs can override the defaults. */
4293
4294 code = GET_CODE (x);
4295 switch (code)
4296 {
4297 case MULT:
4298 /* Multiplication has time-complexity O(N*N), where N is the
4299 number of units (translated from digits) when using
4300 schoolbook long multiplication. */
4301 total = factor * factor * COSTS_N_INSNS (5);
4302 break;
4303 case DIV:
4304 case UDIV:
4305 case MOD:
4306 case UMOD:
4307 /* Similarly, complexity for schoolbook long division. */
4308 total = factor * factor * COSTS_N_INSNS (7);
4309 break;
4310 case USE:
4311 /* Used in combine.c as a marker. */
4312 total = 0;
4313 break;
4314 case SET:
4315 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4316 the mode for the factor. */
4317 mode = GET_MODE (SET_DEST (x));
4318 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4319 if (factor == 0)
4320 factor = 1;
4321 /* FALLTHRU */
4322 default:
4323 total = factor * COSTS_N_INSNS (1);
4324 }
4325
4326 switch (code)
4327 {
4328 case REG:
4329 return 0;
4330
4331 case SUBREG:
4332 total = 0;
4333 /* If we can't tie these modes, make this expensive. The larger
4334 the mode, the more expensive it is. */
4335 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4336 return COSTS_N_INSNS (2 + factor);
4337 break;
4338
4339 case TRUNCATE:
4340 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4341 {
4342 total = 0;
4343 break;
4344 }
4345 /* FALLTHRU */
4346 default:
4347 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4348 return total;
4349 break;
4350 }
4351
4352 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4353 which is already in total. */
4354
4355 fmt = GET_RTX_FORMAT (code);
4356 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4357 if (fmt[i] == 'e')
4358 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4359 else if (fmt[i] == 'E')
4360 for (j = 0; j < XVECLEN (x, i); j++)
4361 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4362
4363 return total;
4364 }
4365
4366 /* Fill in the structure C with information about both speed and size rtx
4367 costs for X, which is operand OPNO in an expression with code OUTER. */
4368
4369 void
4370 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4371 struct full_rtx_costs *c)
4372 {
4373 c->speed = rtx_cost (x, mode, outer, opno, true);
4374 c->size = rtx_cost (x, mode, outer, opno, false);
4375 }
4376
4377 \f
4378 /* Return cost of address expression X.
4379 Expect that X is properly formed address reference.
4380
4381 SPEED parameter specify whether costs optimized for speed or size should
4382 be returned. */
4383
4384 int
4385 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4386 {
4387 /* We may be asked for cost of various unusual addresses, such as operands
4388 of push instruction. It is not worthwhile to complicate writing
4389 of the target hook by such cases. */
4390
4391 if (!memory_address_addr_space_p (mode, x, as))
4392 return 1000;
4393
4394 return targetm.address_cost (x, mode, as, speed);
4395 }
4396
4397 /* If the target doesn't override, compute the cost as with arithmetic. */
4398
4399 int
4400 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4401 {
4402 return rtx_cost (x, Pmode, MEM, 0, speed);
4403 }
4404 \f
4405
4406 unsigned HOST_WIDE_INT
4407 nonzero_bits (const_rtx x, machine_mode mode)
4408 {
4409 if (mode == VOIDmode)
4410 mode = GET_MODE (x);
4411 scalar_int_mode int_mode;
4412 if (!is_a <scalar_int_mode> (mode, &int_mode))
4413 return GET_MODE_MASK (mode);
4414 return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4415 }
4416
4417 unsigned int
4418 num_sign_bit_copies (const_rtx x, machine_mode mode)
4419 {
4420 if (mode == VOIDmode)
4421 mode = GET_MODE (x);
4422 scalar_int_mode int_mode;
4423 if (!is_a <scalar_int_mode> (mode, &int_mode))
4424 return 1;
4425 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4426 }
4427
4428 /* Return true if nonzero_bits1 might recurse into both operands
4429 of X. */
4430
4431 static inline bool
4432 nonzero_bits_binary_arith_p (const_rtx x)
4433 {
4434 if (!ARITHMETIC_P (x))
4435 return false;
4436 switch (GET_CODE (x))
4437 {
4438 case AND:
4439 case XOR:
4440 case IOR:
4441 case UMIN:
4442 case UMAX:
4443 case SMIN:
4444 case SMAX:
4445 case PLUS:
4446 case MINUS:
4447 case MULT:
4448 case DIV:
4449 case UDIV:
4450 case MOD:
4451 case UMOD:
4452 return true;
4453 default:
4454 return false;
4455 }
4456 }
4457
4458 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4459 It avoids exponential behavior in nonzero_bits1 when X has
4460 identical subexpressions on the first or the second level. */
4461
4462 static unsigned HOST_WIDE_INT
4463 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4464 machine_mode known_mode,
4465 unsigned HOST_WIDE_INT known_ret)
4466 {
4467 if (x == known_x && mode == known_mode)
4468 return known_ret;
4469
4470 /* Try to find identical subexpressions. If found call
4471 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4472 precomputed value for the subexpression as KNOWN_RET. */
4473
4474 if (nonzero_bits_binary_arith_p (x))
4475 {
4476 rtx x0 = XEXP (x, 0);
4477 rtx x1 = XEXP (x, 1);
4478
4479 /* Check the first level. */
4480 if (x0 == x1)
4481 return nonzero_bits1 (x, mode, x0, mode,
4482 cached_nonzero_bits (x0, mode, known_x,
4483 known_mode, known_ret));
4484
4485 /* Check the second level. */
4486 if (nonzero_bits_binary_arith_p (x0)
4487 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4488 return nonzero_bits1 (x, mode, x1, mode,
4489 cached_nonzero_bits (x1, mode, known_x,
4490 known_mode, known_ret));
4491
4492 if (nonzero_bits_binary_arith_p (x1)
4493 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4494 return nonzero_bits1 (x, mode, x0, mode,
4495 cached_nonzero_bits (x0, mode, known_x,
4496 known_mode, known_ret));
4497 }
4498
4499 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4500 }
4501
4502 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4503 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4504 is less useful. We can't allow both, because that results in exponential
4505 run time recursion. There is a nullstone testcase that triggered
4506 this. This macro avoids accidental uses of num_sign_bit_copies. */
4507 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4508
4509 /* Given an expression, X, compute which bits in X can be nonzero.
4510 We don't care about bits outside of those defined in MODE.
4511
4512 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4513 an arithmetic operation, we can do better. */
4514
4515 static unsigned HOST_WIDE_INT
4516 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4517 machine_mode known_mode,
4518 unsigned HOST_WIDE_INT known_ret)
4519 {
4520 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4521 unsigned HOST_WIDE_INT inner_nz;
4522 enum rtx_code code = GET_CODE (x);
4523 machine_mode inner_mode;
4524 unsigned int inner_width;
4525 scalar_int_mode xmode;
4526
4527 unsigned int mode_width = GET_MODE_PRECISION (mode);
4528
4529 if (CONST_INT_P (x))
4530 {
4531 if (SHORT_IMMEDIATES_SIGN_EXTEND
4532 && INTVAL (x) > 0
4533 && mode_width < BITS_PER_WORD
4534 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4535 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4536
4537 return UINTVAL (x);
4538 }
4539
4540 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4541 return nonzero;
4542 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4543
4544 /* If X is wider than MODE, use its mode instead. */
4545 if (xmode_width > mode_width)
4546 {
4547 mode = xmode;
4548 nonzero = GET_MODE_MASK (mode);
4549 mode_width = xmode_width;
4550 }
4551
4552 if (mode_width > HOST_BITS_PER_WIDE_INT)
4553 /* Our only callers in this case look for single bit values. So
4554 just return the mode mask. Those tests will then be false. */
4555 return nonzero;
4556
4557 /* If MODE is wider than X, but both are a single word for both the host
4558 and target machines, we can compute this from which bits of the object
4559 might be nonzero in its own mode, taking into account the fact that, on
4560 CISC machines, accessing an object in a wider mode generally causes the
4561 high-order bits to become undefined, so they are not known to be zero.
4562 We extend this reasoning to RISC machines for operations that might not
4563 operate on the full registers. */
4564 if (mode_width > xmode_width
4565 && xmode_width <= BITS_PER_WORD
4566 && xmode_width <= HOST_BITS_PER_WIDE_INT
4567 && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4568 {
4569 nonzero &= cached_nonzero_bits (x, xmode,
4570 known_x, known_mode, known_ret);
4571 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4572 return nonzero;
4573 }
4574
4575 /* Please keep nonzero_bits_binary_arith_p above in sync with
4576 the code in the switch below. */
4577 switch (code)
4578 {
4579 case REG:
4580 #if defined(POINTERS_EXTEND_UNSIGNED)
4581 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4582 all the bits above ptr_mode are known to be zero. */
4583 /* As we do not know which address space the pointer is referring to,
4584 we can do this only if the target does not support different pointer
4585 or address modes depending on the address space. */
4586 if (target_default_pointer_address_modes_p ()
4587 && POINTERS_EXTEND_UNSIGNED
4588 && xmode == Pmode
4589 && REG_POINTER (x)
4590 && !targetm.have_ptr_extend ())
4591 nonzero &= GET_MODE_MASK (ptr_mode);
4592 #endif
4593
4594 /* Include declared information about alignment of pointers. */
4595 /* ??? We don't properly preserve REG_POINTER changes across
4596 pointer-to-integer casts, so we can't trust it except for
4597 things that we know must be pointers. See execute/960116-1.c. */
4598 if ((x == stack_pointer_rtx
4599 || x == frame_pointer_rtx
4600 || x == arg_pointer_rtx)
4601 && REGNO_POINTER_ALIGN (REGNO (x)))
4602 {
4603 unsigned HOST_WIDE_INT alignment
4604 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4605
4606 #ifdef PUSH_ROUNDING
4607 /* If PUSH_ROUNDING is defined, it is possible for the
4608 stack to be momentarily aligned only to that amount,
4609 so we pick the least alignment. */
4610 if (x == stack_pointer_rtx && PUSH_ARGS)
4611 {
4612 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4613 alignment = MIN (known_alignment (rounded_1), alignment);
4614 }
4615 #endif
4616
4617 nonzero &= ~(alignment - 1);
4618 }
4619
4620 {
4621 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4622 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4623 &nonzero_for_hook);
4624
4625 if (new_rtx)
4626 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4627 known_mode, known_ret);
4628
4629 return nonzero_for_hook;
4630 }
4631
4632 case MEM:
4633 /* In many, if not most, RISC machines, reading a byte from memory
4634 zeros the rest of the register. Noticing that fact saves a lot
4635 of extra zero-extends. */
4636 if (load_extend_op (xmode) == ZERO_EXTEND)
4637 nonzero &= GET_MODE_MASK (xmode);
4638 break;
4639
4640 case EQ: case NE:
4641 case UNEQ: case LTGT:
4642 case GT: case GTU: case UNGT:
4643 case LT: case LTU: case UNLT:
4644 case GE: case GEU: case UNGE:
4645 case LE: case LEU: case UNLE:
4646 case UNORDERED: case ORDERED:
4647 /* If this produces an integer result, we know which bits are set.
4648 Code here used to clear bits outside the mode of X, but that is
4649 now done above. */
4650 /* Mind that MODE is the mode the caller wants to look at this
4651 operation in, and not the actual operation mode. We can wind
4652 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4653 that describes the results of a vector compare. */
4654 if (GET_MODE_CLASS (xmode) == MODE_INT
4655 && mode_width <= HOST_BITS_PER_WIDE_INT)
4656 nonzero = STORE_FLAG_VALUE;
4657 break;
4658
4659 case NEG:
4660 #if 0
4661 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4662 and num_sign_bit_copies. */
4663 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4664 nonzero = 1;
4665 #endif
4666
4667 if (xmode_width < mode_width)
4668 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4669 break;
4670
4671 case ABS:
4672 #if 0
4673 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4674 and num_sign_bit_copies. */
4675 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4676 nonzero = 1;
4677 #endif
4678 break;
4679
4680 case TRUNCATE:
4681 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4682 known_x, known_mode, known_ret)
4683 & GET_MODE_MASK (mode));
4684 break;
4685
4686 case ZERO_EXTEND:
4687 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4688 known_x, known_mode, known_ret);
4689 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4690 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4691 break;
4692
4693 case SIGN_EXTEND:
4694 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4695 Otherwise, show all the bits in the outer mode but not the inner
4696 may be nonzero. */
4697 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4698 known_x, known_mode, known_ret);
4699 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4700 {
4701 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4702 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4703 inner_nz |= (GET_MODE_MASK (mode)
4704 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4705 }
4706
4707 nonzero &= inner_nz;
4708 break;
4709
4710 case AND:
4711 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4712 known_x, known_mode, known_ret)
4713 & cached_nonzero_bits (XEXP (x, 1), mode,
4714 known_x, known_mode, known_ret);
4715 break;
4716
4717 case XOR: case IOR:
4718 case UMIN: case UMAX: case SMIN: case SMAX:
4719 {
4720 unsigned HOST_WIDE_INT nonzero0
4721 = cached_nonzero_bits (XEXP (x, 0), mode,
4722 known_x, known_mode, known_ret);
4723
4724 /* Don't call nonzero_bits for the second time if it cannot change
4725 anything. */
4726 if ((nonzero & nonzero0) != nonzero)
4727 nonzero &= nonzero0
4728 | cached_nonzero_bits (XEXP (x, 1), mode,
4729 known_x, known_mode, known_ret);
4730 }
4731 break;
4732
4733 case PLUS: case MINUS:
4734 case MULT:
4735 case DIV: case UDIV:
4736 case MOD: case UMOD:
4737 /* We can apply the rules of arithmetic to compute the number of
4738 high- and low-order zero bits of these operations. We start by
4739 computing the width (position of the highest-order nonzero bit)
4740 and the number of low-order zero bits for each value. */
4741 {
4742 unsigned HOST_WIDE_INT nz0
4743 = cached_nonzero_bits (XEXP (x, 0), mode,
4744 known_x, known_mode, known_ret);
4745 unsigned HOST_WIDE_INT nz1
4746 = cached_nonzero_bits (XEXP (x, 1), mode,
4747 known_x, known_mode, known_ret);
4748 int sign_index = xmode_width - 1;
4749 int width0 = floor_log2 (nz0) + 1;
4750 int width1 = floor_log2 (nz1) + 1;
4751 int low0 = ctz_or_zero (nz0);
4752 int low1 = ctz_or_zero (nz1);
4753 unsigned HOST_WIDE_INT op0_maybe_minusp
4754 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4755 unsigned HOST_WIDE_INT op1_maybe_minusp
4756 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4757 unsigned int result_width = mode_width;
4758 int result_low = 0;
4759
4760 switch (code)
4761 {
4762 case PLUS:
4763 result_width = MAX (width0, width1) + 1;
4764 result_low = MIN (low0, low1);
4765 break;
4766 case MINUS:
4767 result_low = MIN (low0, low1);
4768 break;
4769 case MULT:
4770 result_width = width0 + width1;
4771 result_low = low0 + low1;
4772 break;
4773 case DIV:
4774 if (width1 == 0)
4775 break;
4776 if (!op0_maybe_minusp && !op1_maybe_minusp)
4777 result_width = width0;
4778 break;
4779 case UDIV:
4780 if (width1 == 0)
4781 break;
4782 result_width = width0;
4783 break;
4784 case MOD:
4785 if (width1 == 0)
4786 break;
4787 if (!op0_maybe_minusp && !op1_maybe_minusp)
4788 result_width = MIN (width0, width1);
4789 result_low = MIN (low0, low1);
4790 break;
4791 case UMOD:
4792 if (width1 == 0)
4793 break;
4794 result_width = MIN (width0, width1);
4795 result_low = MIN (low0, low1);
4796 break;
4797 default:
4798 gcc_unreachable ();
4799 }
4800
4801 if (result_width < mode_width)
4802 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4803
4804 if (result_low > 0)
4805 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4806 }
4807 break;
4808
4809 case ZERO_EXTRACT:
4810 if (CONST_INT_P (XEXP (x, 1))
4811 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4812 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4813 break;
4814
4815 case SUBREG:
4816 /* If this is a SUBREG formed for a promoted variable that has
4817 been zero-extended, we know that at least the high-order bits
4818 are zero, though others might be too. */
4819 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4820 nonzero = GET_MODE_MASK (xmode)
4821 & cached_nonzero_bits (SUBREG_REG (x), xmode,
4822 known_x, known_mode, known_ret);
4823
4824 /* If the inner mode is a single word for both the host and target
4825 machines, we can compute this from which bits of the inner
4826 object might be nonzero. */
4827 inner_mode = GET_MODE (SUBREG_REG (x));
4828 if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4829 && inner_width <= BITS_PER_WORD
4830 && inner_width <= HOST_BITS_PER_WIDE_INT)
4831 {
4832 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4833 known_x, known_mode, known_ret);
4834
4835 /* On a typical CISC machine, accessing an object in a wider mode
4836 causes the high-order bits to become undefined. So they are
4837 not known to be zero.
4838
4839 On a typical RISC machine, we only have to worry about the way
4840 loads are extended. Otherwise, if we get a reload for the inner
4841 part, it may be loaded from the stack, and then we may lose all
4842 the zero bits that existed before the store to the stack. */
4843 rtx_code extend_op;
4844 if ((!WORD_REGISTER_OPERATIONS
4845 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4846 ? val_signbit_known_set_p (inner_mode, nonzero)
4847 : extend_op != ZERO_EXTEND)
4848 || !MEM_P (SUBREG_REG (x)))
4849 && xmode_width > inner_width)
4850 nonzero
4851 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4852 }
4853 break;
4854
4855 case ASHIFT:
4856 case ASHIFTRT:
4857 case LSHIFTRT:
4858 case ROTATE:
4859 case ROTATERT:
4860 /* The nonzero bits are in two classes: any bits within MODE
4861 that aren't in xmode are always significant. The rest of the
4862 nonzero bits are those that are significant in the operand of
4863 the shift when shifted the appropriate number of bits. This
4864 shows that high-order bits are cleared by the right shift and
4865 low-order bits by left shifts. */
4866 if (CONST_INT_P (XEXP (x, 1))
4867 && INTVAL (XEXP (x, 1)) >= 0
4868 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4869 && INTVAL (XEXP (x, 1)) < xmode_width)
4870 {
4871 int count = INTVAL (XEXP (x, 1));
4872 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4873 unsigned HOST_WIDE_INT op_nonzero
4874 = cached_nonzero_bits (XEXP (x, 0), mode,
4875 known_x, known_mode, known_ret);
4876 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4877 unsigned HOST_WIDE_INT outer = 0;
4878
4879 if (mode_width > xmode_width)
4880 outer = (op_nonzero & nonzero & ~mode_mask);
4881
4882 switch (code)
4883 {
4884 case ASHIFT:
4885 inner <<= count;
4886 break;
4887
4888 case LSHIFTRT:
4889 inner >>= count;
4890 break;
4891
4892 case ASHIFTRT:
4893 inner >>= count;
4894
4895 /* If the sign bit may have been nonzero before the shift, we
4896 need to mark all the places it could have been copied to
4897 by the shift as possibly nonzero. */
4898 if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4899 inner |= (((HOST_WIDE_INT_1U << count) - 1)
4900 << (xmode_width - count));
4901 break;
4902
4903 case ROTATE:
4904 inner = (inner << (count % xmode_width)
4905 | (inner >> (xmode_width - (count % xmode_width))))
4906 & mode_mask;
4907 break;
4908
4909 case ROTATERT:
4910 inner = (inner >> (count % xmode_width)
4911 | (inner << (xmode_width - (count % xmode_width))))
4912 & mode_mask;
4913 break;
4914
4915 default:
4916 gcc_unreachable ();
4917 }
4918
4919 nonzero &= (outer | inner);
4920 }
4921 break;
4922
4923 case FFS:
4924 case POPCOUNT:
4925 /* This is at most the number of bits in the mode. */
4926 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4927 break;
4928
4929 case CLZ:
4930 /* If CLZ has a known value at zero, then the nonzero bits are
4931 that value, plus the number of bits in the mode minus one. */
4932 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4933 nonzero
4934 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4935 else
4936 nonzero = -1;
4937 break;
4938
4939 case CTZ:
4940 /* If CTZ has a known value at zero, then the nonzero bits are
4941 that value, plus the number of bits in the mode minus one. */
4942 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4943 nonzero
4944 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4945 else
4946 nonzero = -1;
4947 break;
4948
4949 case CLRSB:
4950 /* This is at most the number of bits in the mode minus 1. */
4951 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4952 break;
4953
4954 case PARITY:
4955 nonzero = 1;
4956 break;
4957
4958 case IF_THEN_ELSE:
4959 {
4960 unsigned HOST_WIDE_INT nonzero_true
4961 = cached_nonzero_bits (XEXP (x, 1), mode,
4962 known_x, known_mode, known_ret);
4963
4964 /* Don't call nonzero_bits for the second time if it cannot change
4965 anything. */
4966 if ((nonzero & nonzero_true) != nonzero)
4967 nonzero &= nonzero_true
4968 | cached_nonzero_bits (XEXP (x, 2), mode,
4969 known_x, known_mode, known_ret);
4970 }
4971 break;
4972
4973 default:
4974 break;
4975 }
4976
4977 return nonzero;
4978 }
4979
4980 /* See the macro definition above. */
4981 #undef cached_num_sign_bit_copies
4982
4983 \f
4984 /* Return true if num_sign_bit_copies1 might recurse into both operands
4985 of X. */
4986
4987 static inline bool
4988 num_sign_bit_copies_binary_arith_p (const_rtx x)
4989 {
4990 if (!ARITHMETIC_P (x))
4991 return false;
4992 switch (GET_CODE (x))
4993 {
4994 case IOR:
4995 case AND:
4996 case XOR:
4997 case SMIN:
4998 case SMAX:
4999 case UMIN:
5000 case UMAX:
5001 case PLUS:
5002 case MINUS:
5003 case MULT:
5004 return true;
5005 default:
5006 return false;
5007 }
5008 }
5009
5010 /* The function cached_num_sign_bit_copies is a wrapper around
5011 num_sign_bit_copies1. It avoids exponential behavior in
5012 num_sign_bit_copies1 when X has identical subexpressions on the
5013 first or the second level. */
5014
5015 static unsigned int
5016 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
5017 const_rtx known_x, machine_mode known_mode,
5018 unsigned int known_ret)
5019 {
5020 if (x == known_x && mode == known_mode)
5021 return known_ret;
5022
5023 /* Try to find identical subexpressions. If found call
5024 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5025 the precomputed value for the subexpression as KNOWN_RET. */
5026
5027 if (num_sign_bit_copies_binary_arith_p (x))
5028 {
5029 rtx x0 = XEXP (x, 0);
5030 rtx x1 = XEXP (x, 1);
5031
5032 /* Check the first level. */
5033 if (x0 == x1)
5034 return
5035 num_sign_bit_copies1 (x, mode, x0, mode,
5036 cached_num_sign_bit_copies (x0, mode, known_x,
5037 known_mode,
5038 known_ret));
5039
5040 /* Check the second level. */
5041 if (num_sign_bit_copies_binary_arith_p (x0)
5042 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
5043 return
5044 num_sign_bit_copies1 (x, mode, x1, mode,
5045 cached_num_sign_bit_copies (x1, mode, known_x,
5046 known_mode,
5047 known_ret));
5048
5049 if (num_sign_bit_copies_binary_arith_p (x1)
5050 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
5051 return
5052 num_sign_bit_copies1 (x, mode, x0, mode,
5053 cached_num_sign_bit_copies (x0, mode, known_x,
5054 known_mode,
5055 known_ret));
5056 }
5057
5058 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5059 }
5060
5061 /* Return the number of bits at the high-order end of X that are known to
5062 be equal to the sign bit. X will be used in mode MODE. The returned
5063 value will always be between 1 and the number of bits in MODE. */
5064
5065 static unsigned int
5066 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5067 machine_mode known_mode,
5068 unsigned int known_ret)
5069 {
5070 enum rtx_code code = GET_CODE (x);
5071 unsigned int bitwidth = GET_MODE_PRECISION (mode);
5072 int num0, num1, result;
5073 unsigned HOST_WIDE_INT nonzero;
5074
5075 if (CONST_INT_P (x))
5076 {
5077 /* If the constant is negative, take its 1's complement and remask.
5078 Then see how many zero bits we have. */
5079 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5080 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5081 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5082 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5083
5084 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5085 }
5086
5087 scalar_int_mode xmode, inner_mode;
5088 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5089 return 1;
5090
5091 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5092
5093 /* For a smaller mode, just ignore the high bits. */
5094 if (bitwidth < xmode_width)
5095 {
5096 num0 = cached_num_sign_bit_copies (x, xmode,
5097 known_x, known_mode, known_ret);
5098 return MAX (1, num0 - (int) (xmode_width - bitwidth));
5099 }
5100
5101 if (bitwidth > xmode_width)
5102 {
5103 /* If this machine does not do all register operations on the entire
5104 register and MODE is wider than the mode of X, we can say nothing
5105 at all about the high-order bits. We extend this reasoning to RISC
5106 machines for operations that might not operate on full registers. */
5107 if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5108 return 1;
5109
5110 /* Likewise on machines that do, if the mode of the object is smaller
5111 than a word and loads of that size don't sign extend, we can say
5112 nothing about the high order bits. */
5113 if (xmode_width < BITS_PER_WORD
5114 && load_extend_op (xmode) != SIGN_EXTEND)
5115 return 1;
5116 }
5117
5118 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5119 the code in the switch below. */
5120 switch (code)
5121 {
5122 case REG:
5123
5124 #if defined(POINTERS_EXTEND_UNSIGNED)
5125 /* If pointers extend signed and this is a pointer in Pmode, say that
5126 all the bits above ptr_mode are known to be sign bit copies. */
5127 /* As we do not know which address space the pointer is referring to,
5128 we can do this only if the target does not support different pointer
5129 or address modes depending on the address space. */
5130 if (target_default_pointer_address_modes_p ()
5131 && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5132 && mode == Pmode && REG_POINTER (x)
5133 && !targetm.have_ptr_extend ())
5134 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5135 #endif
5136
5137 {
5138 unsigned int copies_for_hook = 1, copies = 1;
5139 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5140 &copies_for_hook);
5141
5142 if (new_rtx)
5143 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5144 known_mode, known_ret);
5145
5146 if (copies > 1 || copies_for_hook > 1)
5147 return MAX (copies, copies_for_hook);
5148
5149 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5150 }
5151 break;
5152
5153 case MEM:
5154 /* Some RISC machines sign-extend all loads of smaller than a word. */
5155 if (load_extend_op (xmode) == SIGN_EXTEND)
5156 return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5157 break;
5158
5159 case SUBREG:
5160 /* If this is a SUBREG for a promoted object that is sign-extended
5161 and we are looking at it in a wider mode, we know that at least the
5162 high-order bits are known to be sign bit copies. */
5163
5164 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5165 {
5166 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5167 known_x, known_mode, known_ret);
5168 return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5169 }
5170
5171 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5172 {
5173 /* For a smaller object, just ignore the high bits. */
5174 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5175 {
5176 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5177 known_x, known_mode,
5178 known_ret);
5179 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5180 - bitwidth));
5181 }
5182
5183 /* For paradoxical SUBREGs on machines where all register operations
5184 affect the entire register, just look inside. Note that we are
5185 passing MODE to the recursive call, so the number of sign bit
5186 copies will remain relative to that mode, not the inner mode.
5187
5188 This works only if loads sign extend. Otherwise, if we get a
5189 reload for the inner part, it may be loaded from the stack, and
5190 then we lose all sign bit copies that existed before the store
5191 to the stack. */
5192 if (WORD_REGISTER_OPERATIONS
5193 && load_extend_op (inner_mode) == SIGN_EXTEND
5194 && paradoxical_subreg_p (x)
5195 && MEM_P (SUBREG_REG (x)))
5196 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5197 known_x, known_mode, known_ret);
5198 }
5199 break;
5200
5201 case SIGN_EXTRACT:
5202 if (CONST_INT_P (XEXP (x, 1)))
5203 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5204 break;
5205
5206 case SIGN_EXTEND:
5207 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5208 return (bitwidth - GET_MODE_PRECISION (inner_mode)
5209 + cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5210 known_x, known_mode, known_ret));
5211 break;
5212
5213 case TRUNCATE:
5214 /* For a smaller object, just ignore the high bits. */
5215 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5216 num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5217 known_x, known_mode, known_ret);
5218 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5219 - bitwidth)));
5220
5221 case NOT:
5222 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5223 known_x, known_mode, known_ret);
5224
5225 case ROTATE: case ROTATERT:
5226 /* If we are rotating left by a number of bits less than the number
5227 of sign bit copies, we can just subtract that amount from the
5228 number. */
5229 if (CONST_INT_P (XEXP (x, 1))
5230 && INTVAL (XEXP (x, 1)) >= 0
5231 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5232 {
5233 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5234 known_x, known_mode, known_ret);
5235 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5236 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5237 }
5238 break;
5239
5240 case NEG:
5241 /* In general, this subtracts one sign bit copy. But if the value
5242 is known to be positive, the number of sign bit copies is the
5243 same as that of the input. Finally, if the input has just one bit
5244 that might be nonzero, all the bits are copies of the sign bit. */
5245 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5246 known_x, known_mode, known_ret);
5247 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5248 return num0 > 1 ? num0 - 1 : 1;
5249
5250 nonzero = nonzero_bits (XEXP (x, 0), mode);
5251 if (nonzero == 1)
5252 return bitwidth;
5253
5254 if (num0 > 1
5255 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5256 num0--;
5257
5258 return num0;
5259
5260 case IOR: case AND: case XOR:
5261 case SMIN: case SMAX: case UMIN: case UMAX:
5262 /* Logical operations will preserve the number of sign-bit copies.
5263 MIN and MAX operations always return one of the operands. */
5264 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5265 known_x, known_mode, known_ret);
5266 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5267 known_x, known_mode, known_ret);
5268
5269 /* If num1 is clearing some of the top bits then regardless of
5270 the other term, we are guaranteed to have at least that many
5271 high-order zero bits. */
5272 if (code == AND
5273 && num1 > 1
5274 && bitwidth <= HOST_BITS_PER_WIDE_INT
5275 && CONST_INT_P (XEXP (x, 1))
5276 && (UINTVAL (XEXP (x, 1))
5277 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5278 return num1;
5279
5280 /* Similarly for IOR when setting high-order bits. */
5281 if (code == IOR
5282 && num1 > 1
5283 && bitwidth <= HOST_BITS_PER_WIDE_INT
5284 && CONST_INT_P (XEXP (x, 1))
5285 && (UINTVAL (XEXP (x, 1))
5286 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5287 return num1;
5288
5289 return MIN (num0, num1);
5290
5291 case PLUS: case MINUS:
5292 /* For addition and subtraction, we can have a 1-bit carry. However,
5293 if we are subtracting 1 from a positive number, there will not
5294 be such a carry. Furthermore, if the positive number is known to
5295 be 0 or 1, we know the result is either -1 or 0. */
5296
5297 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5298 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5299 {
5300 nonzero = nonzero_bits (XEXP (x, 0), mode);
5301 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5302 return (nonzero == 1 || nonzero == 0 ? bitwidth
5303 : bitwidth - floor_log2 (nonzero) - 1);
5304 }
5305
5306 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5307 known_x, known_mode, known_ret);
5308 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5309 known_x, known_mode, known_ret);
5310 result = MAX (1, MIN (num0, num1) - 1);
5311
5312 return result;
5313
5314 case MULT:
5315 /* The number of bits of the product is the sum of the number of
5316 bits of both terms. However, unless one of the terms if known
5317 to be positive, we must allow for an additional bit since negating
5318 a negative number can remove one sign bit copy. */
5319
5320 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5321 known_x, known_mode, known_ret);
5322 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5323 known_x, known_mode, known_ret);
5324
5325 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5326 if (result > 0
5327 && (bitwidth > HOST_BITS_PER_WIDE_INT
5328 || (((nonzero_bits (XEXP (x, 0), mode)
5329 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5330 && ((nonzero_bits (XEXP (x, 1), mode)
5331 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5332 != 0))))
5333 result--;
5334
5335 return MAX (1, result);
5336
5337 case UDIV:
5338 /* The result must be <= the first operand. If the first operand
5339 has the high bit set, we know nothing about the number of sign
5340 bit copies. */
5341 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5342 return 1;
5343 else if ((nonzero_bits (XEXP (x, 0), mode)
5344 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5345 return 1;
5346 else
5347 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5348 known_x, known_mode, known_ret);
5349
5350 case UMOD:
5351 /* The result must be <= the second operand. If the second operand
5352 has (or just might have) the high bit set, we know nothing about
5353 the number of sign bit copies. */
5354 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5355 return 1;
5356 else if ((nonzero_bits (XEXP (x, 1), mode)
5357 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5358 return 1;
5359 else
5360 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5361 known_x, known_mode, known_ret);
5362
5363 case DIV:
5364 /* Similar to unsigned division, except that we have to worry about
5365 the case where the divisor is negative, in which case we have
5366 to add 1. */
5367 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5368 known_x, known_mode, known_ret);
5369 if (result > 1
5370 && (bitwidth > HOST_BITS_PER_WIDE_INT
5371 || (nonzero_bits (XEXP (x, 1), mode)
5372 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5373 result--;
5374
5375 return result;
5376
5377 case MOD:
5378 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5379 known_x, known_mode, known_ret);
5380 if (result > 1
5381 && (bitwidth > HOST_BITS_PER_WIDE_INT
5382 || (nonzero_bits (XEXP (x, 1), mode)
5383 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5384 result--;
5385
5386 return result;
5387
5388 case ASHIFTRT:
5389 /* Shifts by a constant add to the number of bits equal to the
5390 sign bit. */
5391 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5392 known_x, known_mode, known_ret);
5393 if (CONST_INT_P (XEXP (x, 1))
5394 && INTVAL (XEXP (x, 1)) > 0
5395 && INTVAL (XEXP (x, 1)) < xmode_width)
5396 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5397
5398 return num0;
5399
5400 case ASHIFT:
5401 /* Left shifts destroy copies. */
5402 if (!CONST_INT_P (XEXP (x, 1))
5403 || INTVAL (XEXP (x, 1)) < 0
5404 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5405 || INTVAL (XEXP (x, 1)) >= xmode_width)
5406 return 1;
5407
5408 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5409 known_x, known_mode, known_ret);
5410 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5411
5412 case IF_THEN_ELSE:
5413 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5414 known_x, known_mode, known_ret);
5415 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5416 known_x, known_mode, known_ret);
5417 return MIN (num0, num1);
5418
5419 case EQ: case NE: case GE: case GT: case LE: case LT:
5420 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5421 case GEU: case GTU: case LEU: case LTU:
5422 case UNORDERED: case ORDERED:
5423 /* If the constant is negative, take its 1's complement and remask.
5424 Then see how many zero bits we have. */
5425 nonzero = STORE_FLAG_VALUE;
5426 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5427 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5428 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5429
5430 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5431
5432 default:
5433 break;
5434 }
5435
5436 /* If we haven't been able to figure it out by one of the above rules,
5437 see if some of the high-order bits are known to be zero. If so,
5438 count those bits and return one less than that amount. If we can't
5439 safely compute the mask for this mode, always return BITWIDTH. */
5440
5441 bitwidth = GET_MODE_PRECISION (mode);
5442 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5443 return 1;
5444
5445 nonzero = nonzero_bits (x, mode);
5446 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5447 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5448 }
5449
5450 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5451 zero indicates an instruction pattern without a known cost. */
5452
5453 int
5454 pattern_cost (rtx pat, bool speed)
5455 {
5456 int i, cost;
5457 rtx set;
5458
5459 /* Extract the single set rtx from the instruction pattern. We
5460 can't use single_set since we only have the pattern. We also
5461 consider PARALLELs of a normal set and a single comparison. In
5462 that case we use the cost of the non-comparison SET operation,
5463 which is most-likely to be the real cost of this operation. */
5464 if (GET_CODE (pat) == SET)
5465 set = pat;
5466 else if (GET_CODE (pat) == PARALLEL)
5467 {
5468 set = NULL_RTX;
5469 rtx comparison = NULL_RTX;
5470
5471 for (i = 0; i < XVECLEN (pat, 0); i++)
5472 {
5473 rtx x = XVECEXP (pat, 0, i);
5474 if (GET_CODE (x) == SET)
5475 {
5476 if (GET_CODE (SET_SRC (x)) == COMPARE)
5477 {
5478 if (comparison)
5479 return 0;
5480 comparison = x;
5481 }
5482 else
5483 {
5484 if (set)
5485 return 0;
5486 set = x;
5487 }
5488 }
5489 }
5490
5491 if (!set && comparison)
5492 set = comparison;
5493
5494 if (!set)
5495 return 0;
5496 }
5497 else
5498 return 0;
5499
5500 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5501 return cost > 0 ? cost : COSTS_N_INSNS (1);
5502 }
5503
5504 /* Calculate the cost of a single instruction. A return value of zero
5505 indicates an instruction pattern without a known cost. */
5506
5507 int
5508 insn_cost (rtx_insn *insn, bool speed)
5509 {
5510 if (targetm.insn_cost)
5511 return targetm.insn_cost (insn, speed);
5512
5513 return pattern_cost (PATTERN (insn), speed);
5514 }
5515
5516 /* Returns estimate on cost of computing SEQ. */
5517
5518 unsigned
5519 seq_cost (const rtx_insn *seq, bool speed)
5520 {
5521 unsigned cost = 0;
5522 rtx set;
5523
5524 for (; seq; seq = NEXT_INSN (seq))
5525 {
5526 set = single_set (seq);
5527 if (set)
5528 cost += set_rtx_cost (set, speed);
5529 else if (NONDEBUG_INSN_P (seq))
5530 {
5531 int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5532 if (this_cost > 0)
5533 cost += this_cost;
5534 else
5535 cost++;
5536 }
5537 }
5538
5539 return cost;
5540 }
5541
5542 /* Given an insn INSN and condition COND, return the condition in a
5543 canonical form to simplify testing by callers. Specifically:
5544
5545 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5546 (2) Both operands will be machine operands; (cc0) will have been replaced.
5547 (3) If an operand is a constant, it will be the second operand.
5548 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5549 for GE, GEU, and LEU.
5550
5551 If the condition cannot be understood, or is an inequality floating-point
5552 comparison which needs to be reversed, 0 will be returned.
5553
5554 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5555
5556 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5557 insn used in locating the condition was found. If a replacement test
5558 of the condition is desired, it should be placed in front of that
5559 insn and we will be sure that the inputs are still valid.
5560
5561 If WANT_REG is nonzero, we wish the condition to be relative to that
5562 register, if possible. Therefore, do not canonicalize the condition
5563 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5564 to be a compare to a CC mode register.
5565
5566 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5567 and at INSN. */
5568
5569 rtx
5570 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5571 rtx_insn **earliest,
5572 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5573 {
5574 enum rtx_code code;
5575 rtx_insn *prev = insn;
5576 const_rtx set;
5577 rtx tem;
5578 rtx op0, op1;
5579 int reverse_code = 0;
5580 machine_mode mode;
5581 basic_block bb = BLOCK_FOR_INSN (insn);
5582
5583 code = GET_CODE (cond);
5584 mode = GET_MODE (cond);
5585 op0 = XEXP (cond, 0);
5586 op1 = XEXP (cond, 1);
5587
5588 if (reverse)
5589 code = reversed_comparison_code (cond, insn);
5590 if (code == UNKNOWN)
5591 return 0;
5592
5593 if (earliest)
5594 *earliest = insn;
5595
5596 /* If we are comparing a register with zero, see if the register is set
5597 in the previous insn to a COMPARE or a comparison operation. Perform
5598 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5599 in cse.c */
5600
5601 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5602 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5603 && op1 == CONST0_RTX (GET_MODE (op0))
5604 && op0 != want_reg)
5605 {
5606 /* Set nonzero when we find something of interest. */
5607 rtx x = 0;
5608
5609 /* If comparison with cc0, import actual comparison from compare
5610 insn. */
5611 if (op0 == cc0_rtx)
5612 {
5613 if ((prev = prev_nonnote_insn (prev)) == 0
5614 || !NONJUMP_INSN_P (prev)
5615 || (set = single_set (prev)) == 0
5616 || SET_DEST (set) != cc0_rtx)
5617 return 0;
5618
5619 op0 = SET_SRC (set);
5620 op1 = CONST0_RTX (GET_MODE (op0));
5621 if (earliest)
5622 *earliest = prev;
5623 }
5624
5625 /* If this is a COMPARE, pick up the two things being compared. */
5626 if (GET_CODE (op0) == COMPARE)
5627 {
5628 op1 = XEXP (op0, 1);
5629 op0 = XEXP (op0, 0);
5630 continue;
5631 }
5632 else if (!REG_P (op0))
5633 break;
5634
5635 /* Go back to the previous insn. Stop if it is not an INSN. We also
5636 stop if it isn't a single set or if it has a REG_INC note because
5637 we don't want to bother dealing with it. */
5638
5639 prev = prev_nonnote_nondebug_insn (prev);
5640
5641 if (prev == 0
5642 || !NONJUMP_INSN_P (prev)
5643 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5644 /* In cfglayout mode, there do not have to be labels at the
5645 beginning of a block, or jumps at the end, so the previous
5646 conditions would not stop us when we reach bb boundary. */
5647 || BLOCK_FOR_INSN (prev) != bb)
5648 break;
5649
5650 set = set_of (op0, prev);
5651
5652 if (set
5653 && (GET_CODE (set) != SET
5654 || !rtx_equal_p (SET_DEST (set), op0)))
5655 break;
5656
5657 /* If this is setting OP0, get what it sets it to if it looks
5658 relevant. */
5659 if (set)
5660 {
5661 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5662 #ifdef FLOAT_STORE_FLAG_VALUE
5663 REAL_VALUE_TYPE fsfv;
5664 #endif
5665
5666 /* ??? We may not combine comparisons done in a CCmode with
5667 comparisons not done in a CCmode. This is to aid targets
5668 like Alpha that have an IEEE compliant EQ instruction, and
5669 a non-IEEE compliant BEQ instruction. The use of CCmode is
5670 actually artificial, simply to prevent the combination, but
5671 should not affect other platforms.
5672
5673 However, we must allow VOIDmode comparisons to match either
5674 CCmode or non-CCmode comparison, because some ports have
5675 modeless comparisons inside branch patterns.
5676
5677 ??? This mode check should perhaps look more like the mode check
5678 in simplify_comparison in combine. */
5679 if (((GET_MODE_CLASS (mode) == MODE_CC)
5680 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5681 && mode != VOIDmode
5682 && inner_mode != VOIDmode)
5683 break;
5684 if (GET_CODE (SET_SRC (set)) == COMPARE
5685 || (((code == NE
5686 || (code == LT
5687 && val_signbit_known_set_p (inner_mode,
5688 STORE_FLAG_VALUE))
5689 #ifdef FLOAT_STORE_FLAG_VALUE
5690 || (code == LT
5691 && SCALAR_FLOAT_MODE_P (inner_mode)
5692 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5693 REAL_VALUE_NEGATIVE (fsfv)))
5694 #endif
5695 ))
5696 && COMPARISON_P (SET_SRC (set))))
5697 x = SET_SRC (set);
5698 else if (((code == EQ
5699 || (code == GE
5700 && val_signbit_known_set_p (inner_mode,
5701 STORE_FLAG_VALUE))
5702 #ifdef FLOAT_STORE_FLAG_VALUE
5703 || (code == GE
5704 && SCALAR_FLOAT_MODE_P (inner_mode)
5705 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5706 REAL_VALUE_NEGATIVE (fsfv)))
5707 #endif
5708 ))
5709 && COMPARISON_P (SET_SRC (set)))
5710 {
5711 reverse_code = 1;
5712 x = SET_SRC (set);
5713 }
5714 else if ((code == EQ || code == NE)
5715 && GET_CODE (SET_SRC (set)) == XOR)
5716 /* Handle sequences like:
5717
5718 (set op0 (xor X Y))
5719 ...(eq|ne op0 (const_int 0))...
5720
5721 in which case:
5722
5723 (eq op0 (const_int 0)) reduces to (eq X Y)
5724 (ne op0 (const_int 0)) reduces to (ne X Y)
5725
5726 This is the form used by MIPS16, for example. */
5727 x = SET_SRC (set);
5728 else
5729 break;
5730 }
5731
5732 else if (reg_set_p (op0, prev))
5733 /* If this sets OP0, but not directly, we have to give up. */
5734 break;
5735
5736 if (x)
5737 {
5738 /* If the caller is expecting the condition to be valid at INSN,
5739 make sure X doesn't change before INSN. */
5740 if (valid_at_insn_p)
5741 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5742 break;
5743 if (COMPARISON_P (x))
5744 code = GET_CODE (x);
5745 if (reverse_code)
5746 {
5747 code = reversed_comparison_code (x, prev);
5748 if (code == UNKNOWN)
5749 return 0;
5750 reverse_code = 0;
5751 }
5752
5753 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5754 if (earliest)
5755 *earliest = prev;
5756 }
5757 }
5758
5759 /* If constant is first, put it last. */
5760 if (CONSTANT_P (op0))
5761 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5762
5763 /* If OP0 is the result of a comparison, we weren't able to find what
5764 was really being compared, so fail. */
5765 if (!allow_cc_mode
5766 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5767 return 0;
5768
5769 /* Canonicalize any ordered comparison with integers involving equality
5770 if we can do computations in the relevant mode and we do not
5771 overflow. */
5772
5773 scalar_int_mode op0_mode;
5774 if (CONST_INT_P (op1)
5775 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5776 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5777 {
5778 HOST_WIDE_INT const_val = INTVAL (op1);
5779 unsigned HOST_WIDE_INT uconst_val = const_val;
5780 unsigned HOST_WIDE_INT max_val
5781 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5782
5783 switch (code)
5784 {
5785 case LE:
5786 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5787 code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5788 break;
5789
5790 /* When cross-compiling, const_val might be sign-extended from
5791 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5792 case GE:
5793 if ((const_val & max_val)
5794 != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5795 code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5796 break;
5797
5798 case LEU:
5799 if (uconst_val < max_val)
5800 code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5801 break;
5802
5803 case GEU:
5804 if (uconst_val != 0)
5805 code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5806 break;
5807
5808 default:
5809 break;
5810 }
5811 }
5812
5813 /* Never return CC0; return zero instead. */
5814 if (CC0_P (op0))
5815 return 0;
5816
5817 /* We promised to return a comparison. */
5818 rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5819 if (COMPARISON_P (ret))
5820 return ret;
5821 return 0;
5822 }
5823
5824 /* Given a jump insn JUMP, return the condition that will cause it to branch
5825 to its JUMP_LABEL. If the condition cannot be understood, or is an
5826 inequality floating-point comparison which needs to be reversed, 0 will
5827 be returned.
5828
5829 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5830 insn used in locating the condition was found. If a replacement test
5831 of the condition is desired, it should be placed in front of that
5832 insn and we will be sure that the inputs are still valid. If EARLIEST
5833 is null, the returned condition will be valid at INSN.
5834
5835 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5836 compare CC mode register.
5837
5838 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5839
5840 rtx
5841 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5842 int valid_at_insn_p)
5843 {
5844 rtx cond;
5845 int reverse;
5846 rtx set;
5847
5848 /* If this is not a standard conditional jump, we can't parse it. */
5849 if (!JUMP_P (jump)
5850 || ! any_condjump_p (jump))
5851 return 0;
5852 set = pc_set (jump);
5853
5854 cond = XEXP (SET_SRC (set), 0);
5855
5856 /* If this branches to JUMP_LABEL when the condition is false, reverse
5857 the condition. */
5858 reverse
5859 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5860 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5861
5862 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5863 allow_cc_mode, valid_at_insn_p);
5864 }
5865
5866 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5867 TARGET_MODE_REP_EXTENDED.
5868
5869 Note that we assume that the property of
5870 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5871 narrower than mode B. I.e., if A is a mode narrower than B then in
5872 order to be able to operate on it in mode B, mode A needs to
5873 satisfy the requirements set by the representation of mode B. */
5874
5875 static void
5876 init_num_sign_bit_copies_in_rep (void)
5877 {
5878 opt_scalar_int_mode in_mode_iter;
5879 scalar_int_mode mode;
5880
5881 FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5882 FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5883 {
5884 scalar_int_mode in_mode = in_mode_iter.require ();
5885 scalar_int_mode i;
5886
5887 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5888 extends to the next widest mode. */
5889 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5890 || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5891
5892 /* We are in in_mode. Count how many bits outside of mode
5893 have to be copies of the sign-bit. */
5894 FOR_EACH_MODE (i, mode, in_mode)
5895 {
5896 /* This must always exist (for the last iteration it will be
5897 IN_MODE). */
5898 scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5899
5900 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5901 /* We can only check sign-bit copies starting from the
5902 top-bit. In order to be able to check the bits we
5903 have already seen we pretend that subsequent bits
5904 have to be sign-bit copies too. */
5905 || num_sign_bit_copies_in_rep [in_mode][mode])
5906 num_sign_bit_copies_in_rep [in_mode][mode]
5907 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5908 }
5909 }
5910 }
5911
5912 /* Suppose that truncation from the machine mode of X to MODE is not a
5913 no-op. See if there is anything special about X so that we can
5914 assume it already contains a truncated value of MODE. */
5915
5916 bool
5917 truncated_to_mode (machine_mode mode, const_rtx x)
5918 {
5919 /* This register has already been used in MODE without explicit
5920 truncation. */
5921 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5922 return true;
5923
5924 /* See if we already satisfy the requirements of MODE. If yes we
5925 can just switch to MODE. */
5926 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5927 && (num_sign_bit_copies (x, GET_MODE (x))
5928 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5929 return true;
5930
5931 return false;
5932 }
5933 \f
5934 /* Return true if RTX code CODE has a single sequence of zero or more
5935 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5936 entry in that case. */
5937
5938 static bool
5939 setup_reg_subrtx_bounds (unsigned int code)
5940 {
5941 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5942 unsigned int i = 0;
5943 for (; format[i] != 'e'; ++i)
5944 {
5945 if (!format[i])
5946 /* No subrtxes. Leave start and count as 0. */
5947 return true;
5948 if (format[i] == 'E' || format[i] == 'V')
5949 return false;
5950 }
5951
5952 /* Record the sequence of 'e's. */
5953 rtx_all_subrtx_bounds[code].start = i;
5954 do
5955 ++i;
5956 while (format[i] == 'e');
5957 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5958 /* rtl-iter.h relies on this. */
5959 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5960
5961 for (; format[i]; ++i)
5962 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5963 return false;
5964
5965 return true;
5966 }
5967
5968 /* Initialize rtx_all_subrtx_bounds. */
5969 void
5970 init_rtlanal (void)
5971 {
5972 int i;
5973 for (i = 0; i < NUM_RTX_CODE; i++)
5974 {
5975 if (!setup_reg_subrtx_bounds (i))
5976 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5977 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5978 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5979 }
5980
5981 init_num_sign_bit_copies_in_rep ();
5982 }
5983 \f
5984 /* Check whether this is a constant pool constant. */
5985 bool
5986 constant_pool_constant_p (rtx x)
5987 {
5988 x = avoid_constant_pool_reference (x);
5989 return CONST_DOUBLE_P (x);
5990 }
5991 \f
5992 /* If M is a bitmask that selects a field of low-order bits within an item but
5993 not the entire word, return the length of the field. Return -1 otherwise.
5994 M is used in machine mode MODE. */
5995
5996 int
5997 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5998 {
5999 if (mode != VOIDmode)
6000 {
6001 if (!HWI_COMPUTABLE_MODE_P (mode))
6002 return -1;
6003 m &= GET_MODE_MASK (mode);
6004 }
6005
6006 return exact_log2 (m + 1);
6007 }
6008
6009 /* Return the mode of MEM's address. */
6010
6011 scalar_int_mode
6012 get_address_mode (rtx mem)
6013 {
6014 machine_mode mode;
6015
6016 gcc_assert (MEM_P (mem));
6017 mode = GET_MODE (XEXP (mem, 0));
6018 if (mode != VOIDmode)
6019 return as_a <scalar_int_mode> (mode);
6020 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
6021 }
6022 \f
6023 /* Split up a CONST_DOUBLE or integer constant rtx
6024 into two rtx's for single words,
6025 storing in *FIRST the word that comes first in memory in the target
6026 and in *SECOND the other.
6027
6028 TODO: This function needs to be rewritten to work on any size
6029 integer. */
6030
6031 void
6032 split_double (rtx value, rtx *first, rtx *second)
6033 {
6034 if (CONST_INT_P (value))
6035 {
6036 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
6037 {
6038 /* In this case the CONST_INT holds both target words.
6039 Extract the bits from it into two word-sized pieces.
6040 Sign extend each half to HOST_WIDE_INT. */
6041 unsigned HOST_WIDE_INT low, high;
6042 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
6043 unsigned bits_per_word = BITS_PER_WORD;
6044
6045 /* Set sign_bit to the most significant bit of a word. */
6046 sign_bit = 1;
6047 sign_bit <<= bits_per_word - 1;
6048
6049 /* Set mask so that all bits of the word are set. We could
6050 have used 1 << BITS_PER_WORD instead of basing the
6051 calculation on sign_bit. However, on machines where
6052 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6053 compiler warning, even though the code would never be
6054 executed. */
6055 mask = sign_bit << 1;
6056 mask--;
6057
6058 /* Set sign_extend as any remaining bits. */
6059 sign_extend = ~mask;
6060
6061 /* Pick the lower word and sign-extend it. */
6062 low = INTVAL (value);
6063 low &= mask;
6064 if (low & sign_bit)
6065 low |= sign_extend;
6066
6067 /* Pick the higher word, shifted to the least significant
6068 bits, and sign-extend it. */
6069 high = INTVAL (value);
6070 high >>= bits_per_word - 1;
6071 high >>= 1;
6072 high &= mask;
6073 if (high & sign_bit)
6074 high |= sign_extend;
6075
6076 /* Store the words in the target machine order. */
6077 if (WORDS_BIG_ENDIAN)
6078 {
6079 *first = GEN_INT (high);
6080 *second = GEN_INT (low);
6081 }
6082 else
6083 {
6084 *first = GEN_INT (low);
6085 *second = GEN_INT (high);
6086 }
6087 }
6088 else
6089 {
6090 /* The rule for using CONST_INT for a wider mode
6091 is that we regard the value as signed.
6092 So sign-extend it. */
6093 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6094 if (WORDS_BIG_ENDIAN)
6095 {
6096 *first = high;
6097 *second = value;
6098 }
6099 else
6100 {
6101 *first = value;
6102 *second = high;
6103 }
6104 }
6105 }
6106 else if (GET_CODE (value) == CONST_WIDE_INT)
6107 {
6108 /* All of this is scary code and needs to be converted to
6109 properly work with any size integer. */
6110 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6111 if (WORDS_BIG_ENDIAN)
6112 {
6113 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6114 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6115 }
6116 else
6117 {
6118 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6119 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6120 }
6121 }
6122 else if (!CONST_DOUBLE_P (value))
6123 {
6124 if (WORDS_BIG_ENDIAN)
6125 {
6126 *first = const0_rtx;
6127 *second = value;
6128 }
6129 else
6130 {
6131 *first = value;
6132 *second = const0_rtx;
6133 }
6134 }
6135 else if (GET_MODE (value) == VOIDmode
6136 /* This is the old way we did CONST_DOUBLE integers. */
6137 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6138 {
6139 /* In an integer, the words are defined as most and least significant.
6140 So order them by the target's convention. */
6141 if (WORDS_BIG_ENDIAN)
6142 {
6143 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6144 *second = GEN_INT (CONST_DOUBLE_LOW (value));
6145 }
6146 else
6147 {
6148 *first = GEN_INT (CONST_DOUBLE_LOW (value));
6149 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6150 }
6151 }
6152 else
6153 {
6154 long l[2];
6155
6156 /* Note, this converts the REAL_VALUE_TYPE to the target's
6157 format, splits up the floating point double and outputs
6158 exactly 32 bits of it into each of l[0] and l[1] --
6159 not necessarily BITS_PER_WORD bits. */
6160 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6161
6162 /* If 32 bits is an entire word for the target, but not for the host,
6163 then sign-extend on the host so that the number will look the same
6164 way on the host that it would on the target. See for instance
6165 simplify_unary_operation. The #if is needed to avoid compiler
6166 warnings. */
6167
6168 #if HOST_BITS_PER_LONG > 32
6169 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6170 {
6171 if (l[0] & ((long) 1 << 31))
6172 l[0] |= ((unsigned long) (-1) << 32);
6173 if (l[1] & ((long) 1 << 31))
6174 l[1] |= ((unsigned long) (-1) << 32);
6175 }
6176 #endif
6177
6178 *first = GEN_INT (l[0]);
6179 *second = GEN_INT (l[1]);
6180 }
6181 }
6182
6183 /* Return true if X is a sign_extract or zero_extract from the least
6184 significant bit. */
6185
6186 static bool
6187 lsb_bitfield_op_p (rtx x)
6188 {
6189 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6190 {
6191 machine_mode mode = GET_MODE (XEXP (x, 0));
6192 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6193 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6194 poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6195
6196 return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6197 }
6198 return false;
6199 }
6200
6201 /* Strip outer address "mutations" from LOC and return a pointer to the
6202 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6203 stripped expression there.
6204
6205 "Mutations" either convert between modes or apply some kind of
6206 extension, truncation or alignment. */
6207
6208 rtx *
6209 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6210 {
6211 for (;;)
6212 {
6213 enum rtx_code code = GET_CODE (*loc);
6214 if (GET_RTX_CLASS (code) == RTX_UNARY)
6215 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6216 used to convert between pointer sizes. */
6217 loc = &XEXP (*loc, 0);
6218 else if (lsb_bitfield_op_p (*loc))
6219 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6220 acts as a combined truncation and extension. */
6221 loc = &XEXP (*loc, 0);
6222 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6223 /* (and ... (const_int -X)) is used to align to X bytes. */
6224 loc = &XEXP (*loc, 0);
6225 else if (code == SUBREG
6226 && !OBJECT_P (SUBREG_REG (*loc))
6227 && subreg_lowpart_p (*loc))
6228 /* (subreg (operator ...) ...) inside and is used for mode
6229 conversion too. */
6230 loc = &SUBREG_REG (*loc);
6231 else
6232 return loc;
6233 if (outer_code)
6234 *outer_code = code;
6235 }
6236 }
6237
6238 /* Return true if CODE applies some kind of scale. The scaled value is
6239 is the first operand and the scale is the second. */
6240
6241 static bool
6242 binary_scale_code_p (enum rtx_code code)
6243 {
6244 return (code == MULT
6245 || code == ASHIFT
6246 /* Needed by ARM targets. */
6247 || code == ASHIFTRT
6248 || code == LSHIFTRT
6249 || code == ROTATE
6250 || code == ROTATERT);
6251 }
6252
6253 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6254 (see address_info). Return null otherwise. */
6255
6256 static rtx *
6257 get_base_term (rtx *inner)
6258 {
6259 if (GET_CODE (*inner) == LO_SUM)
6260 inner = strip_address_mutations (&XEXP (*inner, 0));
6261 if (REG_P (*inner)
6262 || MEM_P (*inner)
6263 || GET_CODE (*inner) == SUBREG
6264 || GET_CODE (*inner) == SCRATCH)
6265 return inner;
6266 return 0;
6267 }
6268
6269 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6270 (see address_info). Return null otherwise. */
6271
6272 static rtx *
6273 get_index_term (rtx *inner)
6274 {
6275 /* At present, only constant scales are allowed. */
6276 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6277 inner = strip_address_mutations (&XEXP (*inner, 0));
6278 if (REG_P (*inner)
6279 || MEM_P (*inner)
6280 || GET_CODE (*inner) == SUBREG
6281 || GET_CODE (*inner) == SCRATCH)
6282 return inner;
6283 return 0;
6284 }
6285
6286 /* Set the segment part of address INFO to LOC, given that INNER is the
6287 unmutated value. */
6288
6289 static void
6290 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6291 {
6292 gcc_assert (!info->segment);
6293 info->segment = loc;
6294 info->segment_term = inner;
6295 }
6296
6297 /* Set the base part of address INFO to LOC, given that INNER is the
6298 unmutated value. */
6299
6300 static void
6301 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6302 {
6303 gcc_assert (!info->base);
6304 info->base = loc;
6305 info->base_term = inner;
6306 }
6307
6308 /* Set the index part of address INFO to LOC, given that INNER is the
6309 unmutated value. */
6310
6311 static void
6312 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6313 {
6314 gcc_assert (!info->index);
6315 info->index = loc;
6316 info->index_term = inner;
6317 }
6318
6319 /* Set the displacement part of address INFO to LOC, given that INNER
6320 is the constant term. */
6321
6322 static void
6323 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6324 {
6325 gcc_assert (!info->disp);
6326 info->disp = loc;
6327 info->disp_term = inner;
6328 }
6329
6330 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6331 rest of INFO accordingly. */
6332
6333 static void
6334 decompose_incdec_address (struct address_info *info)
6335 {
6336 info->autoinc_p = true;
6337
6338 rtx *base = &XEXP (*info->inner, 0);
6339 set_address_base (info, base, base);
6340 gcc_checking_assert (info->base == info->base_term);
6341
6342 /* These addresses are only valid when the size of the addressed
6343 value is known. */
6344 gcc_checking_assert (info->mode != VOIDmode);
6345 }
6346
6347 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6348 of INFO accordingly. */
6349
6350 static void
6351 decompose_automod_address (struct address_info *info)
6352 {
6353 info->autoinc_p = true;
6354
6355 rtx *base = &XEXP (*info->inner, 0);
6356 set_address_base (info, base, base);
6357 gcc_checking_assert (info->base == info->base_term);
6358
6359 rtx plus = XEXP (*info->inner, 1);
6360 gcc_assert (GET_CODE (plus) == PLUS);
6361
6362 info->base_term2 = &XEXP (plus, 0);
6363 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6364
6365 rtx *step = &XEXP (plus, 1);
6366 rtx *inner_step = strip_address_mutations (step);
6367 if (CONSTANT_P (*inner_step))
6368 set_address_disp (info, step, inner_step);
6369 else
6370 set_address_index (info, step, inner_step);
6371 }
6372
6373 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6374 values in [PTR, END). Return a pointer to the end of the used array. */
6375
6376 static rtx **
6377 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6378 {
6379 rtx x = *loc;
6380 if (GET_CODE (x) == PLUS)
6381 {
6382 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6383 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6384 }
6385 else
6386 {
6387 gcc_assert (ptr != end);
6388 *ptr++ = loc;
6389 }
6390 return ptr;
6391 }
6392
6393 /* Evaluate the likelihood of X being a base or index value, returning
6394 positive if it is likely to be a base, negative if it is likely to be
6395 an index, and 0 if we can't tell. Make the magnitude of the return
6396 value reflect the amount of confidence we have in the answer.
6397
6398 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6399
6400 static int
6401 baseness (rtx x, machine_mode mode, addr_space_t as,
6402 enum rtx_code outer_code, enum rtx_code index_code)
6403 {
6404 /* Believe *_POINTER unless the address shape requires otherwise. */
6405 if (REG_P (x) && REG_POINTER (x))
6406 return 2;
6407 if (MEM_P (x) && MEM_POINTER (x))
6408 return 2;
6409
6410 if (REG_P (x) && HARD_REGISTER_P (x))
6411 {
6412 /* X is a hard register. If it only fits one of the base
6413 or index classes, choose that interpretation. */
6414 int regno = REGNO (x);
6415 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6416 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6417 if (base_p != index_p)
6418 return base_p ? 1 : -1;
6419 }
6420 return 0;
6421 }
6422
6423 /* INFO->INNER describes a normal, non-automodified address.
6424 Fill in the rest of INFO accordingly. */
6425
6426 static void
6427 decompose_normal_address (struct address_info *info)
6428 {
6429 /* Treat the address as the sum of up to four values. */
6430 rtx *ops[4];
6431 size_t n_ops = extract_plus_operands (info->inner, ops,
6432 ops + ARRAY_SIZE (ops)) - ops;
6433
6434 /* If there is more than one component, any base component is in a PLUS. */
6435 if (n_ops > 1)
6436 info->base_outer_code = PLUS;
6437
6438 /* Try to classify each sum operand now. Leave those that could be
6439 either a base or an index in OPS. */
6440 rtx *inner_ops[4];
6441 size_t out = 0;
6442 for (size_t in = 0; in < n_ops; ++in)
6443 {
6444 rtx *loc = ops[in];
6445 rtx *inner = strip_address_mutations (loc);
6446 if (CONSTANT_P (*inner))
6447 set_address_disp (info, loc, inner);
6448 else if (GET_CODE (*inner) == UNSPEC)
6449 set_address_segment (info, loc, inner);
6450 else
6451 {
6452 /* The only other possibilities are a base or an index. */
6453 rtx *base_term = get_base_term (inner);
6454 rtx *index_term = get_index_term (inner);
6455 gcc_assert (base_term || index_term);
6456 if (!base_term)
6457 set_address_index (info, loc, index_term);
6458 else if (!index_term)
6459 set_address_base (info, loc, base_term);
6460 else
6461 {
6462 gcc_assert (base_term == index_term);
6463 ops[out] = loc;
6464 inner_ops[out] = base_term;
6465 ++out;
6466 }
6467 }
6468 }
6469
6470 /* Classify the remaining OPS members as bases and indexes. */
6471 if (out == 1)
6472 {
6473 /* If we haven't seen a base or an index yet, assume that this is
6474 the base. If we were confident that another term was the base
6475 or index, treat the remaining operand as the other kind. */
6476 if (!info->base)
6477 set_address_base (info, ops[0], inner_ops[0]);
6478 else
6479 set_address_index (info, ops[0], inner_ops[0]);
6480 }
6481 else if (out == 2)
6482 {
6483 /* In the event of a tie, assume the base comes first. */
6484 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6485 GET_CODE (*ops[1]))
6486 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6487 GET_CODE (*ops[0])))
6488 {
6489 set_address_base (info, ops[0], inner_ops[0]);
6490 set_address_index (info, ops[1], inner_ops[1]);
6491 }
6492 else
6493 {
6494 set_address_base (info, ops[1], inner_ops[1]);
6495 set_address_index (info, ops[0], inner_ops[0]);
6496 }
6497 }
6498 else
6499 gcc_assert (out == 0);
6500 }
6501
6502 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6503 or VOIDmode if not known. AS is the address space associated with LOC.
6504 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6505
6506 void
6507 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6508 addr_space_t as, enum rtx_code outer_code)
6509 {
6510 memset (info, 0, sizeof (*info));
6511 info->mode = mode;
6512 info->as = as;
6513 info->addr_outer_code = outer_code;
6514 info->outer = loc;
6515 info->inner = strip_address_mutations (loc, &outer_code);
6516 info->base_outer_code = outer_code;
6517 switch (GET_CODE (*info->inner))
6518 {
6519 case PRE_DEC:
6520 case PRE_INC:
6521 case POST_DEC:
6522 case POST_INC:
6523 decompose_incdec_address (info);
6524 break;
6525
6526 case PRE_MODIFY:
6527 case POST_MODIFY:
6528 decompose_automod_address (info);
6529 break;
6530
6531 default:
6532 decompose_normal_address (info);
6533 break;
6534 }
6535 }
6536
6537 /* Describe address operand LOC in INFO. */
6538
6539 void
6540 decompose_lea_address (struct address_info *info, rtx *loc)
6541 {
6542 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6543 }
6544
6545 /* Describe the address of MEM X in INFO. */
6546
6547 void
6548 decompose_mem_address (struct address_info *info, rtx x)
6549 {
6550 gcc_assert (MEM_P (x));
6551 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6552 MEM_ADDR_SPACE (x), MEM);
6553 }
6554
6555 /* Update INFO after a change to the address it describes. */
6556
6557 void
6558 update_address (struct address_info *info)
6559 {
6560 decompose_address (info, info->outer, info->mode, info->as,
6561 info->addr_outer_code);
6562 }
6563
6564 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6565 more complicated than that. */
6566
6567 HOST_WIDE_INT
6568 get_index_scale (const struct address_info *info)
6569 {
6570 rtx index = *info->index;
6571 if (GET_CODE (index) == MULT
6572 && CONST_INT_P (XEXP (index, 1))
6573 && info->index_term == &XEXP (index, 0))
6574 return INTVAL (XEXP (index, 1));
6575
6576 if (GET_CODE (index) == ASHIFT
6577 && CONST_INT_P (XEXP (index, 1))
6578 && info->index_term == &XEXP (index, 0))
6579 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6580
6581 if (info->index == info->index_term)
6582 return 1;
6583
6584 return 0;
6585 }
6586
6587 /* Return the "index code" of INFO, in the form required by
6588 ok_for_base_p_1. */
6589
6590 enum rtx_code
6591 get_index_code (const struct address_info *info)
6592 {
6593 if (info->index)
6594 return GET_CODE (*info->index);
6595
6596 if (info->disp)
6597 return GET_CODE (*info->disp);
6598
6599 return SCRATCH;
6600 }
6601
6602 /* Return true if RTL X contains a SYMBOL_REF. */
6603
6604 bool
6605 contains_symbol_ref_p (const_rtx x)
6606 {
6607 subrtx_iterator::array_type array;
6608 FOR_EACH_SUBRTX (iter, array, x, ALL)
6609 if (SYMBOL_REF_P (*iter))
6610 return true;
6611
6612 return false;
6613 }
6614
6615 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6616
6617 bool
6618 contains_symbolic_reference_p (const_rtx x)
6619 {
6620 subrtx_iterator::array_type array;
6621 FOR_EACH_SUBRTX (iter, array, x, ALL)
6622 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6623 return true;
6624
6625 return false;
6626 }
6627
6628 /* Return true if RTL X contains a constant pool address. */
6629
6630 bool
6631 contains_constant_pool_address_p (const_rtx x)
6632 {
6633 subrtx_iterator::array_type array;
6634 FOR_EACH_SUBRTX (iter, array, x, ALL)
6635 if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6636 return true;
6637
6638 return false;
6639 }
6640
6641
6642 /* Return true if X contains a thread-local symbol. */
6643
6644 bool
6645 tls_referenced_p (const_rtx x)
6646 {
6647 if (!targetm.have_tls)
6648 return false;
6649
6650 subrtx_iterator::array_type array;
6651 FOR_EACH_SUBRTX (iter, array, x, ALL)
6652 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6653 return true;
6654 return false;
6655 }
6656
6657 /* Return true if reg REGNO with mode REG_MODE would be clobbered by the
6658 clobber_high operand in CLOBBER_HIGH_OP. */
6659
6660 bool
6661 reg_is_clobbered_by_clobber_high (unsigned int regno, machine_mode reg_mode,
6662 const_rtx clobber_high_op)
6663 {
6664 unsigned int clobber_regno = REGNO (clobber_high_op);
6665 machine_mode clobber_mode = GET_MODE (clobber_high_op);
6666 unsigned char regno_nregs = hard_regno_nregs (regno, reg_mode);
6667
6668 /* Clobber high should always span exactly one register. */
6669 gcc_assert (REG_NREGS (clobber_high_op) == 1);
6670
6671 /* Clobber high needs to match with one of the registers in X. */
6672 if (clobber_regno < regno || clobber_regno >= regno + regno_nregs)
6673 return false;
6674
6675 gcc_assert (reg_mode != BLKmode && clobber_mode != BLKmode);
6676
6677 if (reg_mode == VOIDmode)
6678 return clobber_mode != VOIDmode;
6679
6680 /* Clobber high will clobber if its size might be greater than the size of
6681 register regno. */
6682 return maybe_gt (exact_div (GET_MODE_SIZE (reg_mode), regno_nregs),
6683 GET_MODE_SIZE (clobber_mode));
6684 }