Add more subreg offset helpers
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
45
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
47 const_rtx, machine_mode,
48 unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
53 machine_mode,
54 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
56 machine_mode, unsigned int);
57
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
60
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
65
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
72 DESTINATION. */
73
74 static unsigned int
75 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
76 \f
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
79
80 template <typename T>
81 typename T::value_type *
82 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
83 value_type *base,
84 size_t i, value_type x)
85 {
86 if (base == array.stack)
87 {
88 if (i < LOCAL_ELEMS)
89 {
90 base[i] = x;
91 return base;
92 }
93 gcc_checking_assert (i == LOCAL_ELEMS);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array.heap) <= i)
97 vec_safe_grow (array.heap, i + 1);
98 base = array.heap->address ();
99 memcpy (base, array.stack, sizeof (array.stack));
100 base[LOCAL_ELEMS] = x;
101 return base;
102 }
103 unsigned int length = array.heap->length ();
104 if (length > i)
105 {
106 gcc_checking_assert (base == array.heap->address ());
107 base[i] = x;
108 return base;
109 }
110 else
111 {
112 gcc_checking_assert (i == length);
113 vec_safe_push (array.heap, x);
114 return array.heap->address ();
115 }
116 }
117
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
120
121 template <typename T>
122 size_t
123 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
124 value_type *base,
125 size_t end, rtx_type x)
126 {
127 enum rtx_code code = GET_CODE (x);
128 const char *format = GET_RTX_FORMAT (code);
129 size_t orig_end = end;
130 if (__builtin_expect (INSN_P (x), false))
131 {
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
134 code below. */
135 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
136 if (format[i] == 'e')
137 {
138 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
139 if (__builtin_expect (end < LOCAL_ELEMS, true))
140 base[end++] = subx;
141 else
142 base = add_single_to_queue (array, base, end++, subx);
143 }
144 }
145 else
146 for (int i = 0; format[i]; ++i)
147 if (format[i] == 'e')
148 {
149 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
150 if (__builtin_expect (end < LOCAL_ELEMS, true))
151 base[end++] = subx;
152 else
153 base = add_single_to_queue (array, base, end++, subx);
154 }
155 else if (format[i] == 'E')
156 {
157 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
158 rtx *vec = x->u.fld[i].rt_rtvec->elem;
159 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
160 for (unsigned int j = 0; j < length; j++)
161 base[end++] = T::get_value (vec[j]);
162 else
163 for (unsigned int j = 0; j < length; j++)
164 base = add_single_to_queue (array, base, end++,
165 T::get_value (vec[j]));
166 if (code == SEQUENCE && end == length)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
171 subinstructions. */
172 for (unsigned int j = 0; j < length; j++)
173 {
174 typename T::rtx_type x = T::get_rtx (base[j]);
175 if (INSN_P (x))
176 base[j] = T::get_value (PATTERN (x));
177 }
178 }
179 return end - orig_end;
180 }
181
182 template <typename T>
183 void
184 generic_subrtx_iterator <T>::free_array (array_type &array)
185 {
186 vec_free (array.heap);
187 }
188
189 template <typename T>
190 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
191
192 template class generic_subrtx_iterator <const_rtx_accessor>;
193 template class generic_subrtx_iterator <rtx_var_accessor>;
194 template class generic_subrtx_iterator <rtx_ptr_accessor>;
195
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
200
201 int
202 rtx_unstable_p (const_rtx x)
203 {
204 const RTX_CODE code = GET_CODE (x);
205 int i;
206 const char *fmt;
207
208 switch (code)
209 {
210 case MEM:
211 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
212
213 case CONST:
214 CASE_CONST_ANY:
215 case SYMBOL_REF:
216 case LABEL_REF:
217 return 0;
218
219 case REG:
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
224 return 0;
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
229 return 0;
230 return 1;
231
232 case ASM_OPERANDS:
233 if (MEM_VOLATILE_P (x))
234 return 1;
235
236 /* Fall through. */
237
238 default:
239 break;
240 }
241
242 fmt = GET_RTX_FORMAT (code);
243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
244 if (fmt[i] == 'e')
245 {
246 if (rtx_unstable_p (XEXP (x, i)))
247 return 1;
248 }
249 else if (fmt[i] == 'E')
250 {
251 int j;
252 for (j = 0; j < XVECLEN (x, i); j++)
253 if (rtx_unstable_p (XVECEXP (x, i, j)))
254 return 1;
255 }
256
257 return 0;
258 }
259
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
266
267 bool
268 rtx_varies_p (const_rtx x, bool for_alias)
269 {
270 RTX_CODE code;
271 int i;
272 const char *fmt;
273
274 if (!x)
275 return 0;
276
277 code = GET_CODE (x);
278 switch (code)
279 {
280 case MEM:
281 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
282
283 case CONST:
284 CASE_CONST_ANY:
285 case SYMBOL_REF:
286 case LABEL_REF:
287 return 0;
288
289 case REG:
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
293 for pseudos. */
294 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
297 return 0;
298 if (x == pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
304 return 0;
305 return 1;
306
307 case LO_SUM:
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
312 || rtx_varies_p (XEXP (x, 1), for_alias);
313
314 case ASM_OPERANDS:
315 if (MEM_VOLATILE_P (x))
316 return 1;
317
318 /* Fall through. */
319
320 default:
321 break;
322 }
323
324 fmt = GET_RTX_FORMAT (code);
325 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
326 if (fmt[i] == 'e')
327 {
328 if (rtx_varies_p (XEXP (x, i), for_alias))
329 return 1;
330 }
331 else if (fmt[i] == 'E')
332 {
333 int j;
334 for (j = 0; j < XVECLEN (x, i); j++)
335 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
336 return 1;
337 }
338
339 return 0;
340 }
341
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
344 of the routine. */
345
346 static HOST_WIDE_INT
347 get_initial_register_offset (int from, int to)
348 {
349 static const struct elim_table_t
350 {
351 const int from;
352 const int to;
353 } table[] = ELIMINABLE_REGS;
354 HOST_WIDE_INT offset1, offset2;
355 unsigned int i, j;
356
357 if (to == from)
358 return 0;
359
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed)
364 {
365 offset1 = crtl->outgoing_args_size + get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
367 offset1 = - offset1;
368 #endif
369 if (to == STACK_POINTER_REGNUM)
370 return offset1;
371 else if (from == STACK_POINTER_REGNUM)
372 return - offset1;
373 else
374 return 0;
375 }
376
377 for (i = 0; i < ARRAY_SIZE (table); i++)
378 if (table[i].from == from)
379 {
380 if (table[i].to == to)
381 {
382 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
383 offset1);
384 return offset1;
385 }
386 for (j = 0; j < ARRAY_SIZE (table); j++)
387 {
388 if (table[j].to == to
389 && table[j].from == table[i].to)
390 {
391 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
392 offset1);
393 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
394 offset2);
395 return offset1 + offset2;
396 }
397 if (table[j].from == to
398 && table[j].to == table[i].to)
399 {
400 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
401 offset1);
402 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
403 offset2);
404 return offset1 - offset2;
405 }
406 }
407 }
408 else if (table[i].to == from)
409 {
410 if (table[i].from == to)
411 {
412 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
413 offset1);
414 return - offset1;
415 }
416 for (j = 0; j < ARRAY_SIZE (table); j++)
417 {
418 if (table[j].to == to
419 && table[j].from == table[i].from)
420 {
421 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
422 offset1);
423 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
424 offset2);
425 return - offset1 + offset2;
426 }
427 if (table[j].from == to
428 && table[j].to == table[i].from)
429 {
430 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
431 offset1);
432 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
433 offset2);
434 return - offset1 - offset2;
435 }
436 }
437 }
438
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from == ARG_POINTER_REGNUM)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
443 else if (to == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
445 else if (from == HARD_FRAME_POINTER_REGNUM)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
447 else if (to == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
449 else
450 return 0;
451 }
452
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
457
458 static int
459 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
460 machine_mode mode, bool unaligned_mems)
461 {
462 enum rtx_code code = GET_CODE (x);
463
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
467 {
468 HOST_WIDE_INT actual_offset = offset;
469
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
476 actual_offset -= STACK_POINTER_OFFSET;
477 #endif
478
479 if (actual_offset % GET_MODE_SIZE (mode) != 0)
480 return 1;
481 }
482
483 switch (code)
484 {
485 case SYMBOL_REF:
486 if (SYMBOL_REF_WEAK (x))
487 return 1;
488 if (!CONSTANT_POOL_ADDRESS_P (x))
489 {
490 tree decl;
491 HOST_WIDE_INT decl_size;
492
493 if (offset < 0)
494 return 1;
495 if (size == 0)
496 size = GET_MODE_SIZE (mode);
497 if (size == 0)
498 return offset != 0;
499
500 /* If the size of the access or of the symbol is unknown,
501 assume the worst. */
502 decl = SYMBOL_REF_DECL (x);
503
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
506 if (!decl)
507 decl_size = -1;
508 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
511 : -1);
512 else if (TREE_CODE (decl) == STRING_CST)
513 decl_size = TREE_STRING_LENGTH (decl);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
515 decl_size = int_size_in_bytes (TREE_TYPE (decl));
516 else
517 decl_size = -1;
518
519 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
520 }
521
522 return 0;
523
524 case LABEL_REF:
525 return 0;
526
527 case REG:
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
531 || x == stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
534 {
535 #ifdef RED_ZONE_SIZE
536 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
537 #else
538 HOST_WIDE_INT red_zone_size = 0;
539 #endif
540 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
541 / BITS_PER_UNIT;
542 HOST_WIDE_INT low_bound, high_bound;
543
544 if (size == 0)
545 size = GET_MODE_SIZE (mode);
546 if (size == 0)
547 return 1;
548
549 if (x == frame_pointer_rtx)
550 {
551 if (FRAME_GROWS_DOWNWARD)
552 {
553 high_bound = STARTING_FRAME_OFFSET;
554 low_bound = high_bound - get_frame_size ();
555 }
556 else
557 {
558 low_bound = STARTING_FRAME_OFFSET;
559 high_bound = low_bound + get_frame_size ();
560 }
561 }
562 else if (x == hard_frame_pointer_rtx)
563 {
564 HOST_WIDE_INT sp_offset
565 = get_initial_register_offset (STACK_POINTER_REGNUM,
566 HARD_FRAME_POINTER_REGNUM);
567 HOST_WIDE_INT ap_offset
568 = get_initial_register_offset (ARG_POINTER_REGNUM,
569 HARD_FRAME_POINTER_REGNUM);
570
571 #if STACK_GROWS_DOWNWARD
572 low_bound = sp_offset - red_zone_size - stack_boundary;
573 high_bound = ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl)
575 #if !ARGS_GROW_DOWNWARD
576 + crtl->args.size
577 #endif
578 + stack_boundary;
579 #else
580 high_bound = sp_offset + red_zone_size + stack_boundary;
581 low_bound = ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl)
583 #if ARGS_GROW_DOWNWARD
584 - crtl->args.size
585 #endif
586 - stack_boundary;
587 #endif
588 }
589 else if (x == stack_pointer_rtx)
590 {
591 HOST_WIDE_INT ap_offset
592 = get_initial_register_offset (ARG_POINTER_REGNUM,
593 STACK_POINTER_REGNUM);
594
595 #if STACK_GROWS_DOWNWARD
596 low_bound = - red_zone_size - stack_boundary;
597 high_bound = ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl)
599 #if !ARGS_GROW_DOWNWARD
600 + crtl->args.size
601 #endif
602 + stack_boundary;
603 #else
604 high_bound = red_zone_size + stack_boundary;
605 low_bound = ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl)
607 #if ARGS_GROW_DOWNWARD
608 - crtl->args.size
609 #endif
610 - stack_boundary;
611 #endif
612 }
613 else
614 {
615 /* We assume that accesses are safe to at least the
616 next stack boundary.
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound = FIRST_PARM_OFFSET (current_function_decl)
620 + stack_boundary;
621 low_bound = FIRST_PARM_OFFSET (current_function_decl)
622 - crtl->args.size - stack_boundary;
623 #else
624 low_bound = FIRST_PARM_OFFSET (current_function_decl)
625 - stack_boundary;
626 high_bound = FIRST_PARM_OFFSET (current_function_decl)
627 + crtl->args.size + stack_boundary;
628 #endif
629 }
630
631 if (offset >= low_bound && offset <= high_bound - size)
632 return 0;
633 return 1;
634 }
635 /* All of the virtual frame registers are stack references. */
636 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
637 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
638 return 0;
639 return 1;
640
641 case CONST:
642 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
643 mode, unaligned_mems);
644
645 case PLUS:
646 /* An address is assumed not to trap if:
647 - it is the pic register plus a constant. */
648 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
649 return 0;
650
651 /* - or it is an address that can't trap plus a constant integer. */
652 if (CONST_INT_P (XEXP (x, 1))
653 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
654 size, mode, unaligned_mems))
655 return 0;
656
657 return 1;
658
659 case LO_SUM:
660 case PRE_MODIFY:
661 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
662 mode, unaligned_mems);
663
664 case PRE_DEC:
665 case PRE_INC:
666 case POST_DEC:
667 case POST_INC:
668 case POST_MODIFY:
669 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
670 mode, unaligned_mems);
671
672 default:
673 break;
674 }
675
676 /* If it isn't one of the case above, it can cause a trap. */
677 return 1;
678 }
679
680 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
681
682 int
683 rtx_addr_can_trap_p (const_rtx x)
684 {
685 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
686 }
687
688 /* Return true if X is an address that is known to not be zero. */
689
690 bool
691 nonzero_address_p (const_rtx x)
692 {
693 const enum rtx_code code = GET_CODE (x);
694
695 switch (code)
696 {
697 case SYMBOL_REF:
698 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
699
700 case LABEL_REF:
701 return true;
702
703 case REG:
704 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
705 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
706 || x == stack_pointer_rtx
707 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
708 return true;
709 /* All of the virtual frame registers are stack references. */
710 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
711 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
712 return true;
713 return false;
714
715 case CONST:
716 return nonzero_address_p (XEXP (x, 0));
717
718 case PLUS:
719 /* Handle PIC references. */
720 if (XEXP (x, 0) == pic_offset_table_rtx
721 && CONSTANT_P (XEXP (x, 1)))
722 return true;
723 return false;
724
725 case PRE_MODIFY:
726 /* Similar to the above; allow positive offsets. Further, since
727 auto-inc is only allowed in memories, the register must be a
728 pointer. */
729 if (CONST_INT_P (XEXP (x, 1))
730 && INTVAL (XEXP (x, 1)) > 0)
731 return true;
732 return nonzero_address_p (XEXP (x, 0));
733
734 case PRE_INC:
735 /* Similarly. Further, the offset is always positive. */
736 return true;
737
738 case PRE_DEC:
739 case POST_DEC:
740 case POST_INC:
741 case POST_MODIFY:
742 return nonzero_address_p (XEXP (x, 0));
743
744 case LO_SUM:
745 return nonzero_address_p (XEXP (x, 1));
746
747 default:
748 break;
749 }
750
751 /* If it isn't one of the case above, might be zero. */
752 return false;
753 }
754
755 /* Return 1 if X refers to a memory location whose address
756 cannot be compared reliably with constant addresses,
757 or if X refers to a BLKmode memory object.
758 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
759 zero, we are slightly more conservative. */
760
761 bool
762 rtx_addr_varies_p (const_rtx x, bool for_alias)
763 {
764 enum rtx_code code;
765 int i;
766 const char *fmt;
767
768 if (x == 0)
769 return 0;
770
771 code = GET_CODE (x);
772 if (code == MEM)
773 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
774
775 fmt = GET_RTX_FORMAT (code);
776 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
777 if (fmt[i] == 'e')
778 {
779 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
780 return 1;
781 }
782 else if (fmt[i] == 'E')
783 {
784 int j;
785 for (j = 0; j < XVECLEN (x, i); j++)
786 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
787 return 1;
788 }
789 return 0;
790 }
791 \f
792 /* Return the CALL in X if there is one. */
793
794 rtx
795 get_call_rtx_from (rtx x)
796 {
797 if (INSN_P (x))
798 x = PATTERN (x);
799 if (GET_CODE (x) == PARALLEL)
800 x = XVECEXP (x, 0, 0);
801 if (GET_CODE (x) == SET)
802 x = SET_SRC (x);
803 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
804 return x;
805 return NULL_RTX;
806 }
807 \f
808 /* Return the value of the integer term in X, if one is apparent;
809 otherwise return 0.
810 Only obvious integer terms are detected.
811 This is used in cse.c with the `related_value' field. */
812
813 HOST_WIDE_INT
814 get_integer_term (const_rtx x)
815 {
816 if (GET_CODE (x) == CONST)
817 x = XEXP (x, 0);
818
819 if (GET_CODE (x) == MINUS
820 && CONST_INT_P (XEXP (x, 1)))
821 return - INTVAL (XEXP (x, 1));
822 if (GET_CODE (x) == PLUS
823 && CONST_INT_P (XEXP (x, 1)))
824 return INTVAL (XEXP (x, 1));
825 return 0;
826 }
827
828 /* If X is a constant, return the value sans apparent integer term;
829 otherwise return 0.
830 Only obvious integer terms are detected. */
831
832 rtx
833 get_related_value (const_rtx x)
834 {
835 if (GET_CODE (x) != CONST)
836 return 0;
837 x = XEXP (x, 0);
838 if (GET_CODE (x) == PLUS
839 && CONST_INT_P (XEXP (x, 1)))
840 return XEXP (x, 0);
841 else if (GET_CODE (x) == MINUS
842 && CONST_INT_P (XEXP (x, 1)))
843 return XEXP (x, 0);
844 return 0;
845 }
846 \f
847 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
848 to somewhere in the same object or object_block as SYMBOL. */
849
850 bool
851 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
852 {
853 tree decl;
854
855 if (GET_CODE (symbol) != SYMBOL_REF)
856 return false;
857
858 if (offset == 0)
859 return true;
860
861 if (offset > 0)
862 {
863 if (CONSTANT_POOL_ADDRESS_P (symbol)
864 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
865 return true;
866
867 decl = SYMBOL_REF_DECL (symbol);
868 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
869 return true;
870 }
871
872 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
873 && SYMBOL_REF_BLOCK (symbol)
874 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
875 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
876 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
877 return true;
878
879 return false;
880 }
881
882 /* Split X into a base and a constant offset, storing them in *BASE_OUT
883 and *OFFSET_OUT respectively. */
884
885 void
886 split_const (rtx x, rtx *base_out, rtx *offset_out)
887 {
888 if (GET_CODE (x) == CONST)
889 {
890 x = XEXP (x, 0);
891 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
892 {
893 *base_out = XEXP (x, 0);
894 *offset_out = XEXP (x, 1);
895 return;
896 }
897 }
898 *base_out = x;
899 *offset_out = const0_rtx;
900 }
901 \f
902 /* Return the number of places FIND appears within X. If COUNT_DEST is
903 zero, we do not count occurrences inside the destination of a SET. */
904
905 int
906 count_occurrences (const_rtx x, const_rtx find, int count_dest)
907 {
908 int i, j;
909 enum rtx_code code;
910 const char *format_ptr;
911 int count;
912
913 if (x == find)
914 return 1;
915
916 code = GET_CODE (x);
917
918 switch (code)
919 {
920 case REG:
921 CASE_CONST_ANY:
922 case SYMBOL_REF:
923 case CODE_LABEL:
924 case PC:
925 case CC0:
926 return 0;
927
928 case EXPR_LIST:
929 count = count_occurrences (XEXP (x, 0), find, count_dest);
930 if (XEXP (x, 1))
931 count += count_occurrences (XEXP (x, 1), find, count_dest);
932 return count;
933
934 case MEM:
935 if (MEM_P (find) && rtx_equal_p (x, find))
936 return 1;
937 break;
938
939 case SET:
940 if (SET_DEST (x) == find && ! count_dest)
941 return count_occurrences (SET_SRC (x), find, count_dest);
942 break;
943
944 default:
945 break;
946 }
947
948 format_ptr = GET_RTX_FORMAT (code);
949 count = 0;
950
951 for (i = 0; i < GET_RTX_LENGTH (code); i++)
952 {
953 switch (*format_ptr++)
954 {
955 case 'e':
956 count += count_occurrences (XEXP (x, i), find, count_dest);
957 break;
958
959 case 'E':
960 for (j = 0; j < XVECLEN (x, i); j++)
961 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
962 break;
963 }
964 }
965 return count;
966 }
967
968 \f
969 /* Return TRUE if OP is a register or subreg of a register that
970 holds an unsigned quantity. Otherwise, return FALSE. */
971
972 bool
973 unsigned_reg_p (rtx op)
974 {
975 if (REG_P (op)
976 && REG_EXPR (op)
977 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
978 return true;
979
980 if (GET_CODE (op) == SUBREG
981 && SUBREG_PROMOTED_SIGN (op))
982 return true;
983
984 return false;
985 }
986
987 \f
988 /* Nonzero if register REG appears somewhere within IN.
989 Also works if REG is not a register; in this case it checks
990 for a subexpression of IN that is Lisp "equal" to REG. */
991
992 int
993 reg_mentioned_p (const_rtx reg, const_rtx in)
994 {
995 const char *fmt;
996 int i;
997 enum rtx_code code;
998
999 if (in == 0)
1000 return 0;
1001
1002 if (reg == in)
1003 return 1;
1004
1005 if (GET_CODE (in) == LABEL_REF)
1006 return reg == label_ref_label (in);
1007
1008 code = GET_CODE (in);
1009
1010 switch (code)
1011 {
1012 /* Compare registers by number. */
1013 case REG:
1014 return REG_P (reg) && REGNO (in) == REGNO (reg);
1015
1016 /* These codes have no constituent expressions
1017 and are unique. */
1018 case SCRATCH:
1019 case CC0:
1020 case PC:
1021 return 0;
1022
1023 CASE_CONST_ANY:
1024 /* These are kept unique for a given value. */
1025 return 0;
1026
1027 default:
1028 break;
1029 }
1030
1031 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1032 return 1;
1033
1034 fmt = GET_RTX_FORMAT (code);
1035
1036 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1037 {
1038 if (fmt[i] == 'E')
1039 {
1040 int j;
1041 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1042 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1043 return 1;
1044 }
1045 else if (fmt[i] == 'e'
1046 && reg_mentioned_p (reg, XEXP (in, i)))
1047 return 1;
1048 }
1049 return 0;
1050 }
1051 \f
1052 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1053 no CODE_LABEL insn. */
1054
1055 int
1056 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1057 {
1058 rtx_insn *p;
1059 if (beg == end)
1060 return 0;
1061 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1062 if (LABEL_P (p))
1063 return 0;
1064 return 1;
1065 }
1066
1067 /* Nonzero if register REG is used in an insn between
1068 FROM_INSN and TO_INSN (exclusive of those two). */
1069
1070 int
1071 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1072 const rtx_insn *to_insn)
1073 {
1074 rtx_insn *insn;
1075
1076 if (from_insn == to_insn)
1077 return 0;
1078
1079 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1080 if (NONDEBUG_INSN_P (insn)
1081 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1082 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1083 return 1;
1084 return 0;
1085 }
1086 \f
1087 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1088 is entirely replaced by a new value and the only use is as a SET_DEST,
1089 we do not consider it a reference. */
1090
1091 int
1092 reg_referenced_p (const_rtx x, const_rtx body)
1093 {
1094 int i;
1095
1096 switch (GET_CODE (body))
1097 {
1098 case SET:
1099 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1100 return 1;
1101
1102 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1103 of a REG that occupies all of the REG, the insn references X if
1104 it is mentioned in the destination. */
1105 if (GET_CODE (SET_DEST (body)) != CC0
1106 && GET_CODE (SET_DEST (body)) != PC
1107 && !REG_P (SET_DEST (body))
1108 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1109 && REG_P (SUBREG_REG (SET_DEST (body)))
1110 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1111 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1112 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1113 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1114 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1115 return 1;
1116 return 0;
1117
1118 case ASM_OPERANDS:
1119 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1120 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1121 return 1;
1122 return 0;
1123
1124 case CALL:
1125 case USE:
1126 case IF_THEN_ELSE:
1127 return reg_overlap_mentioned_p (x, body);
1128
1129 case TRAP_IF:
1130 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1131
1132 case PREFETCH:
1133 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1134
1135 case UNSPEC:
1136 case UNSPEC_VOLATILE:
1137 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1138 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1139 return 1;
1140 return 0;
1141
1142 case PARALLEL:
1143 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1144 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1145 return 1;
1146 return 0;
1147
1148 case CLOBBER:
1149 if (MEM_P (XEXP (body, 0)))
1150 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1151 return 1;
1152 return 0;
1153
1154 case COND_EXEC:
1155 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1156 return 1;
1157 return reg_referenced_p (x, COND_EXEC_CODE (body));
1158
1159 default:
1160 return 0;
1161 }
1162 }
1163 \f
1164 /* Nonzero if register REG is set or clobbered in an insn between
1165 FROM_INSN and TO_INSN (exclusive of those two). */
1166
1167 int
1168 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1169 const rtx_insn *to_insn)
1170 {
1171 const rtx_insn *insn;
1172
1173 if (from_insn == to_insn)
1174 return 0;
1175
1176 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1177 if (INSN_P (insn) && reg_set_p (reg, insn))
1178 return 1;
1179 return 0;
1180 }
1181
1182 /* Return true if REG is set or clobbered inside INSN. */
1183
1184 int
1185 reg_set_p (const_rtx reg, const_rtx insn)
1186 {
1187 /* After delay slot handling, call and branch insns might be in a
1188 sequence. Check all the elements there. */
1189 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1190 {
1191 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1192 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1193 return true;
1194
1195 return false;
1196 }
1197
1198 /* We can be passed an insn or part of one. If we are passed an insn,
1199 check if a side-effect of the insn clobbers REG. */
1200 if (INSN_P (insn)
1201 && (FIND_REG_INC_NOTE (insn, reg)
1202 || (CALL_P (insn)
1203 && ((REG_P (reg)
1204 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1205 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1206 GET_MODE (reg), REGNO (reg)))
1207 || MEM_P (reg)
1208 || find_reg_fusage (insn, CLOBBER, reg)))))
1209 return true;
1210
1211 return set_of (reg, insn) != NULL_RTX;
1212 }
1213
1214 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1215 only if none of them are modified between START and END. Return 1 if
1216 X contains a MEM; this routine does use memory aliasing. */
1217
1218 int
1219 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1220 {
1221 const enum rtx_code code = GET_CODE (x);
1222 const char *fmt;
1223 int i, j;
1224 rtx_insn *insn;
1225
1226 if (start == end)
1227 return 0;
1228
1229 switch (code)
1230 {
1231 CASE_CONST_ANY:
1232 case CONST:
1233 case SYMBOL_REF:
1234 case LABEL_REF:
1235 return 0;
1236
1237 case PC:
1238 case CC0:
1239 return 1;
1240
1241 case MEM:
1242 if (modified_between_p (XEXP (x, 0), start, end))
1243 return 1;
1244 if (MEM_READONLY_P (x))
1245 return 0;
1246 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1247 if (memory_modified_in_insn_p (x, insn))
1248 return 1;
1249 return 0;
1250
1251 case REG:
1252 return reg_set_between_p (x, start, end);
1253
1254 default:
1255 break;
1256 }
1257
1258 fmt = GET_RTX_FORMAT (code);
1259 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1260 {
1261 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1262 return 1;
1263
1264 else if (fmt[i] == 'E')
1265 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1266 if (modified_between_p (XVECEXP (x, i, j), start, end))
1267 return 1;
1268 }
1269
1270 return 0;
1271 }
1272
1273 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1274 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1275 does use memory aliasing. */
1276
1277 int
1278 modified_in_p (const_rtx x, const_rtx insn)
1279 {
1280 const enum rtx_code code = GET_CODE (x);
1281 const char *fmt;
1282 int i, j;
1283
1284 switch (code)
1285 {
1286 CASE_CONST_ANY:
1287 case CONST:
1288 case SYMBOL_REF:
1289 case LABEL_REF:
1290 return 0;
1291
1292 case PC:
1293 case CC0:
1294 return 1;
1295
1296 case MEM:
1297 if (modified_in_p (XEXP (x, 0), insn))
1298 return 1;
1299 if (MEM_READONLY_P (x))
1300 return 0;
1301 if (memory_modified_in_insn_p (x, insn))
1302 return 1;
1303 return 0;
1304
1305 case REG:
1306 return reg_set_p (x, insn);
1307
1308 default:
1309 break;
1310 }
1311
1312 fmt = GET_RTX_FORMAT (code);
1313 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1314 {
1315 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1316 return 1;
1317
1318 else if (fmt[i] == 'E')
1319 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1320 if (modified_in_p (XVECEXP (x, i, j), insn))
1321 return 1;
1322 }
1323
1324 return 0;
1325 }
1326 \f
1327 /* Helper function for set_of. */
1328 struct set_of_data
1329 {
1330 const_rtx found;
1331 const_rtx pat;
1332 };
1333
1334 static void
1335 set_of_1 (rtx x, const_rtx pat, void *data1)
1336 {
1337 struct set_of_data *const data = (struct set_of_data *) (data1);
1338 if (rtx_equal_p (x, data->pat)
1339 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1340 data->found = pat;
1341 }
1342
1343 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1344 (either directly or via STRICT_LOW_PART and similar modifiers). */
1345 const_rtx
1346 set_of (const_rtx pat, const_rtx insn)
1347 {
1348 struct set_of_data data;
1349 data.found = NULL_RTX;
1350 data.pat = pat;
1351 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1352 return data.found;
1353 }
1354
1355 /* Add all hard register in X to *PSET. */
1356 void
1357 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1358 {
1359 subrtx_iterator::array_type array;
1360 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1361 {
1362 const_rtx x = *iter;
1363 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1364 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1365 }
1366 }
1367
1368 /* This function, called through note_stores, collects sets and
1369 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1370 by DATA. */
1371 void
1372 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1373 {
1374 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1375 if (REG_P (x) && HARD_REGISTER_P (x))
1376 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1377 }
1378
1379 /* Examine INSN, and compute the set of hard registers written by it.
1380 Store it in *PSET. Should only be called after reload. */
1381 void
1382 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1383 {
1384 rtx link;
1385
1386 CLEAR_HARD_REG_SET (*pset);
1387 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1388 if (CALL_P (insn))
1389 {
1390 if (implicit)
1391 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1392
1393 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1394 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1395 }
1396 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1397 if (REG_NOTE_KIND (link) == REG_INC)
1398 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1399 }
1400
1401 /* Like record_hard_reg_sets, but called through note_uses. */
1402 void
1403 record_hard_reg_uses (rtx *px, void *data)
1404 {
1405 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1406 }
1407 \f
1408 /* Given an INSN, return a SET expression if this insn has only a single SET.
1409 It may also have CLOBBERs, USEs, or SET whose output
1410 will not be used, which we ignore. */
1411
1412 rtx
1413 single_set_2 (const rtx_insn *insn, const_rtx pat)
1414 {
1415 rtx set = NULL;
1416 int set_verified = 1;
1417 int i;
1418
1419 if (GET_CODE (pat) == PARALLEL)
1420 {
1421 for (i = 0; i < XVECLEN (pat, 0); i++)
1422 {
1423 rtx sub = XVECEXP (pat, 0, i);
1424 switch (GET_CODE (sub))
1425 {
1426 case USE:
1427 case CLOBBER:
1428 break;
1429
1430 case SET:
1431 /* We can consider insns having multiple sets, where all
1432 but one are dead as single set insns. In common case
1433 only single set is present in the pattern so we want
1434 to avoid checking for REG_UNUSED notes unless necessary.
1435
1436 When we reach set first time, we just expect this is
1437 the single set we are looking for and only when more
1438 sets are found in the insn, we check them. */
1439 if (!set_verified)
1440 {
1441 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1442 && !side_effects_p (set))
1443 set = NULL;
1444 else
1445 set_verified = 1;
1446 }
1447 if (!set)
1448 set = sub, set_verified = 0;
1449 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1450 || side_effects_p (sub))
1451 return NULL_RTX;
1452 break;
1453
1454 default:
1455 return NULL_RTX;
1456 }
1457 }
1458 }
1459 return set;
1460 }
1461
1462 /* Given an INSN, return nonzero if it has more than one SET, else return
1463 zero. */
1464
1465 int
1466 multiple_sets (const_rtx insn)
1467 {
1468 int found;
1469 int i;
1470
1471 /* INSN must be an insn. */
1472 if (! INSN_P (insn))
1473 return 0;
1474
1475 /* Only a PARALLEL can have multiple SETs. */
1476 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1477 {
1478 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1479 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1480 {
1481 /* If we have already found a SET, then return now. */
1482 if (found)
1483 return 1;
1484 else
1485 found = 1;
1486 }
1487 }
1488
1489 /* Either zero or one SET. */
1490 return 0;
1491 }
1492 \f
1493 /* Return nonzero if the destination of SET equals the source
1494 and there are no side effects. */
1495
1496 int
1497 set_noop_p (const_rtx set)
1498 {
1499 rtx src = SET_SRC (set);
1500 rtx dst = SET_DEST (set);
1501
1502 if (dst == pc_rtx && src == pc_rtx)
1503 return 1;
1504
1505 if (MEM_P (dst) && MEM_P (src))
1506 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1507
1508 if (GET_CODE (dst) == ZERO_EXTRACT)
1509 return rtx_equal_p (XEXP (dst, 0), src)
1510 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1511 && !side_effects_p (src);
1512
1513 if (GET_CODE (dst) == STRICT_LOW_PART)
1514 dst = XEXP (dst, 0);
1515
1516 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1517 {
1518 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1519 return 0;
1520 src = SUBREG_REG (src);
1521 dst = SUBREG_REG (dst);
1522 }
1523
1524 /* It is a NOOP if destination overlaps with selected src vector
1525 elements. */
1526 if (GET_CODE (src) == VEC_SELECT
1527 && REG_P (XEXP (src, 0)) && REG_P (dst)
1528 && HARD_REGISTER_P (XEXP (src, 0))
1529 && HARD_REGISTER_P (dst))
1530 {
1531 int i;
1532 rtx par = XEXP (src, 1);
1533 rtx src0 = XEXP (src, 0);
1534 int c0 = INTVAL (XVECEXP (par, 0, 0));
1535 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1536
1537 for (i = 1; i < XVECLEN (par, 0); i++)
1538 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1539 return 0;
1540 return
1541 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1542 offset, GET_MODE (dst)) == (int) REGNO (dst);
1543 }
1544
1545 return (REG_P (src) && REG_P (dst)
1546 && REGNO (src) == REGNO (dst));
1547 }
1548 \f
1549 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1550 value to itself. */
1551
1552 int
1553 noop_move_p (const rtx_insn *insn)
1554 {
1555 rtx pat = PATTERN (insn);
1556
1557 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1558 return 1;
1559
1560 /* Insns carrying these notes are useful later on. */
1561 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1562 return 0;
1563
1564 /* Check the code to be executed for COND_EXEC. */
1565 if (GET_CODE (pat) == COND_EXEC)
1566 pat = COND_EXEC_CODE (pat);
1567
1568 if (GET_CODE (pat) == SET && set_noop_p (pat))
1569 return 1;
1570
1571 if (GET_CODE (pat) == PARALLEL)
1572 {
1573 int i;
1574 /* If nothing but SETs of registers to themselves,
1575 this insn can also be deleted. */
1576 for (i = 0; i < XVECLEN (pat, 0); i++)
1577 {
1578 rtx tem = XVECEXP (pat, 0, i);
1579
1580 if (GET_CODE (tem) == USE
1581 || GET_CODE (tem) == CLOBBER)
1582 continue;
1583
1584 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1585 return 0;
1586 }
1587
1588 return 1;
1589 }
1590 return 0;
1591 }
1592 \f
1593
1594 /* Return nonzero if register in range [REGNO, ENDREGNO)
1595 appears either explicitly or implicitly in X
1596 other than being stored into.
1597
1598 References contained within the substructure at LOC do not count.
1599 LOC may be zero, meaning don't ignore anything. */
1600
1601 bool
1602 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1603 rtx *loc)
1604 {
1605 int i;
1606 unsigned int x_regno;
1607 RTX_CODE code;
1608 const char *fmt;
1609
1610 repeat:
1611 /* The contents of a REG_NONNEG note is always zero, so we must come here
1612 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1613 if (x == 0)
1614 return false;
1615
1616 code = GET_CODE (x);
1617
1618 switch (code)
1619 {
1620 case REG:
1621 x_regno = REGNO (x);
1622
1623 /* If we modifying the stack, frame, or argument pointer, it will
1624 clobber a virtual register. In fact, we could be more precise,
1625 but it isn't worth it. */
1626 if ((x_regno == STACK_POINTER_REGNUM
1627 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1628 && x_regno == ARG_POINTER_REGNUM)
1629 || x_regno == FRAME_POINTER_REGNUM)
1630 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1631 return true;
1632
1633 return endregno > x_regno && regno < END_REGNO (x);
1634
1635 case SUBREG:
1636 /* If this is a SUBREG of a hard reg, we can see exactly which
1637 registers are being modified. Otherwise, handle normally. */
1638 if (REG_P (SUBREG_REG (x))
1639 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1640 {
1641 unsigned int inner_regno = subreg_regno (x);
1642 unsigned int inner_endregno
1643 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1644 ? subreg_nregs (x) : 1);
1645
1646 return endregno > inner_regno && regno < inner_endregno;
1647 }
1648 break;
1649
1650 case CLOBBER:
1651 case SET:
1652 if (&SET_DEST (x) != loc
1653 /* Note setting a SUBREG counts as referring to the REG it is in for
1654 a pseudo but not for hard registers since we can
1655 treat each word individually. */
1656 && ((GET_CODE (SET_DEST (x)) == SUBREG
1657 && loc != &SUBREG_REG (SET_DEST (x))
1658 && REG_P (SUBREG_REG (SET_DEST (x)))
1659 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1660 && refers_to_regno_p (regno, endregno,
1661 SUBREG_REG (SET_DEST (x)), loc))
1662 || (!REG_P (SET_DEST (x))
1663 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1664 return true;
1665
1666 if (code == CLOBBER || loc == &SET_SRC (x))
1667 return false;
1668 x = SET_SRC (x);
1669 goto repeat;
1670
1671 default:
1672 break;
1673 }
1674
1675 /* X does not match, so try its subexpressions. */
1676
1677 fmt = GET_RTX_FORMAT (code);
1678 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1679 {
1680 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1681 {
1682 if (i == 0)
1683 {
1684 x = XEXP (x, 0);
1685 goto repeat;
1686 }
1687 else
1688 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1689 return true;
1690 }
1691 else if (fmt[i] == 'E')
1692 {
1693 int j;
1694 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1695 if (loc != &XVECEXP (x, i, j)
1696 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1697 return true;
1698 }
1699 }
1700 return false;
1701 }
1702
1703 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1704 we check if any register number in X conflicts with the relevant register
1705 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1706 contains a MEM (we don't bother checking for memory addresses that can't
1707 conflict because we expect this to be a rare case. */
1708
1709 int
1710 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1711 {
1712 unsigned int regno, endregno;
1713
1714 /* If either argument is a constant, then modifying X can not
1715 affect IN. Here we look at IN, we can profitably combine
1716 CONSTANT_P (x) with the switch statement below. */
1717 if (CONSTANT_P (in))
1718 return 0;
1719
1720 recurse:
1721 switch (GET_CODE (x))
1722 {
1723 case STRICT_LOW_PART:
1724 case ZERO_EXTRACT:
1725 case SIGN_EXTRACT:
1726 /* Overly conservative. */
1727 x = XEXP (x, 0);
1728 goto recurse;
1729
1730 case SUBREG:
1731 regno = REGNO (SUBREG_REG (x));
1732 if (regno < FIRST_PSEUDO_REGISTER)
1733 regno = subreg_regno (x);
1734 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1735 ? subreg_nregs (x) : 1);
1736 goto do_reg;
1737
1738 case REG:
1739 regno = REGNO (x);
1740 endregno = END_REGNO (x);
1741 do_reg:
1742 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1743
1744 case MEM:
1745 {
1746 const char *fmt;
1747 int i;
1748
1749 if (MEM_P (in))
1750 return 1;
1751
1752 fmt = GET_RTX_FORMAT (GET_CODE (in));
1753 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1754 if (fmt[i] == 'e')
1755 {
1756 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1757 return 1;
1758 }
1759 else if (fmt[i] == 'E')
1760 {
1761 int j;
1762 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1763 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1764 return 1;
1765 }
1766
1767 return 0;
1768 }
1769
1770 case SCRATCH:
1771 case PC:
1772 case CC0:
1773 return reg_mentioned_p (x, in);
1774
1775 case PARALLEL:
1776 {
1777 int i;
1778
1779 /* If any register in here refers to it we return true. */
1780 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1781 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1782 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1783 return 1;
1784 return 0;
1785 }
1786
1787 default:
1788 gcc_assert (CONSTANT_P (x));
1789 return 0;
1790 }
1791 }
1792 \f
1793 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1794 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1795 ignored by note_stores, but passed to FUN.
1796
1797 FUN receives three arguments:
1798 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1799 2. the SET or CLOBBER rtx that does the store,
1800 3. the pointer DATA provided to note_stores.
1801
1802 If the item being stored in or clobbered is a SUBREG of a hard register,
1803 the SUBREG will be passed. */
1804
1805 void
1806 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1807 {
1808 int i;
1809
1810 if (GET_CODE (x) == COND_EXEC)
1811 x = COND_EXEC_CODE (x);
1812
1813 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1814 {
1815 rtx dest = SET_DEST (x);
1816
1817 while ((GET_CODE (dest) == SUBREG
1818 && (!REG_P (SUBREG_REG (dest))
1819 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1820 || GET_CODE (dest) == ZERO_EXTRACT
1821 || GET_CODE (dest) == STRICT_LOW_PART)
1822 dest = XEXP (dest, 0);
1823
1824 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1825 each of whose first operand is a register. */
1826 if (GET_CODE (dest) == PARALLEL)
1827 {
1828 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1829 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1830 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1831 }
1832 else
1833 (*fun) (dest, x, data);
1834 }
1835
1836 else if (GET_CODE (x) == PARALLEL)
1837 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1838 note_stores (XVECEXP (x, 0, i), fun, data);
1839 }
1840 \f
1841 /* Like notes_stores, but call FUN for each expression that is being
1842 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1843 FUN for each expression, not any interior subexpressions. FUN receives a
1844 pointer to the expression and the DATA passed to this function.
1845
1846 Note that this is not quite the same test as that done in reg_referenced_p
1847 since that considers something as being referenced if it is being
1848 partially set, while we do not. */
1849
1850 void
1851 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1852 {
1853 rtx body = *pbody;
1854 int i;
1855
1856 switch (GET_CODE (body))
1857 {
1858 case COND_EXEC:
1859 (*fun) (&COND_EXEC_TEST (body), data);
1860 note_uses (&COND_EXEC_CODE (body), fun, data);
1861 return;
1862
1863 case PARALLEL:
1864 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1865 note_uses (&XVECEXP (body, 0, i), fun, data);
1866 return;
1867
1868 case SEQUENCE:
1869 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1870 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1871 return;
1872
1873 case USE:
1874 (*fun) (&XEXP (body, 0), data);
1875 return;
1876
1877 case ASM_OPERANDS:
1878 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1879 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1880 return;
1881
1882 case TRAP_IF:
1883 (*fun) (&TRAP_CONDITION (body), data);
1884 return;
1885
1886 case PREFETCH:
1887 (*fun) (&XEXP (body, 0), data);
1888 return;
1889
1890 case UNSPEC:
1891 case UNSPEC_VOLATILE:
1892 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1893 (*fun) (&XVECEXP (body, 0, i), data);
1894 return;
1895
1896 case CLOBBER:
1897 if (MEM_P (XEXP (body, 0)))
1898 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1899 return;
1900
1901 case SET:
1902 {
1903 rtx dest = SET_DEST (body);
1904
1905 /* For sets we replace everything in source plus registers in memory
1906 expression in store and operands of a ZERO_EXTRACT. */
1907 (*fun) (&SET_SRC (body), data);
1908
1909 if (GET_CODE (dest) == ZERO_EXTRACT)
1910 {
1911 (*fun) (&XEXP (dest, 1), data);
1912 (*fun) (&XEXP (dest, 2), data);
1913 }
1914
1915 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1916 dest = XEXP (dest, 0);
1917
1918 if (MEM_P (dest))
1919 (*fun) (&XEXP (dest, 0), data);
1920 }
1921 return;
1922
1923 default:
1924 /* All the other possibilities never store. */
1925 (*fun) (pbody, data);
1926 return;
1927 }
1928 }
1929 \f
1930 /* Return nonzero if X's old contents don't survive after INSN.
1931 This will be true if X is (cc0) or if X is a register and
1932 X dies in INSN or because INSN entirely sets X.
1933
1934 "Entirely set" means set directly and not through a SUBREG, or
1935 ZERO_EXTRACT, so no trace of the old contents remains.
1936 Likewise, REG_INC does not count.
1937
1938 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1939 but for this use that makes no difference, since regs don't overlap
1940 during their lifetimes. Therefore, this function may be used
1941 at any time after deaths have been computed.
1942
1943 If REG is a hard reg that occupies multiple machine registers, this
1944 function will only return 1 if each of those registers will be replaced
1945 by INSN. */
1946
1947 int
1948 dead_or_set_p (const rtx_insn *insn, const_rtx x)
1949 {
1950 unsigned int regno, end_regno;
1951 unsigned int i;
1952
1953 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1954 if (GET_CODE (x) == CC0)
1955 return 1;
1956
1957 gcc_assert (REG_P (x));
1958
1959 regno = REGNO (x);
1960 end_regno = END_REGNO (x);
1961 for (i = regno; i < end_regno; i++)
1962 if (! dead_or_set_regno_p (insn, i))
1963 return 0;
1964
1965 return 1;
1966 }
1967
1968 /* Return TRUE iff DEST is a register or subreg of a register and
1969 doesn't change the number of words of the inner register, and any
1970 part of the register is TEST_REGNO. */
1971
1972 static bool
1973 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1974 {
1975 unsigned int regno, endregno;
1976
1977 if (GET_CODE (dest) == SUBREG
1978 && (((GET_MODE_SIZE (GET_MODE (dest))
1979 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1980 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1981 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1982 dest = SUBREG_REG (dest);
1983
1984 if (!REG_P (dest))
1985 return false;
1986
1987 regno = REGNO (dest);
1988 endregno = END_REGNO (dest);
1989 return (test_regno >= regno && test_regno < endregno);
1990 }
1991
1992 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1993 any member matches the covers_regno_no_parallel_p criteria. */
1994
1995 static bool
1996 covers_regno_p (const_rtx dest, unsigned int test_regno)
1997 {
1998 if (GET_CODE (dest) == PARALLEL)
1999 {
2000 /* Some targets place small structures in registers for return
2001 values of functions, and those registers are wrapped in
2002 PARALLELs that we may see as the destination of a SET. */
2003 int i;
2004
2005 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2006 {
2007 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2008 if (inner != NULL_RTX
2009 && covers_regno_no_parallel_p (inner, test_regno))
2010 return true;
2011 }
2012
2013 return false;
2014 }
2015 else
2016 return covers_regno_no_parallel_p (dest, test_regno);
2017 }
2018
2019 /* Utility function for dead_or_set_p to check an individual register. */
2020
2021 int
2022 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2023 {
2024 const_rtx pattern;
2025
2026 /* See if there is a death note for something that includes TEST_REGNO. */
2027 if (find_regno_note (insn, REG_DEAD, test_regno))
2028 return 1;
2029
2030 if (CALL_P (insn)
2031 && find_regno_fusage (insn, CLOBBER, test_regno))
2032 return 1;
2033
2034 pattern = PATTERN (insn);
2035
2036 /* If a COND_EXEC is not executed, the value survives. */
2037 if (GET_CODE (pattern) == COND_EXEC)
2038 return 0;
2039
2040 if (GET_CODE (pattern) == SET)
2041 return covers_regno_p (SET_DEST (pattern), test_regno);
2042 else if (GET_CODE (pattern) == PARALLEL)
2043 {
2044 int i;
2045
2046 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2047 {
2048 rtx body = XVECEXP (pattern, 0, i);
2049
2050 if (GET_CODE (body) == COND_EXEC)
2051 body = COND_EXEC_CODE (body);
2052
2053 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2054 && covers_regno_p (SET_DEST (body), test_regno))
2055 return 1;
2056 }
2057 }
2058
2059 return 0;
2060 }
2061
2062 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2063 If DATUM is nonzero, look for one whose datum is DATUM. */
2064
2065 rtx
2066 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2067 {
2068 rtx link;
2069
2070 gcc_checking_assert (insn);
2071
2072 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2073 if (! INSN_P (insn))
2074 return 0;
2075 if (datum == 0)
2076 {
2077 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2078 if (REG_NOTE_KIND (link) == kind)
2079 return link;
2080 return 0;
2081 }
2082
2083 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2084 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2085 return link;
2086 return 0;
2087 }
2088
2089 /* Return the reg-note of kind KIND in insn INSN which applies to register
2090 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2091 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2092 it might be the case that the note overlaps REGNO. */
2093
2094 rtx
2095 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2096 {
2097 rtx link;
2098
2099 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2100 if (! INSN_P (insn))
2101 return 0;
2102
2103 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2104 if (REG_NOTE_KIND (link) == kind
2105 /* Verify that it is a register, so that scratch and MEM won't cause a
2106 problem here. */
2107 && REG_P (XEXP (link, 0))
2108 && REGNO (XEXP (link, 0)) <= regno
2109 && END_REGNO (XEXP (link, 0)) > regno)
2110 return link;
2111 return 0;
2112 }
2113
2114 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2115 has such a note. */
2116
2117 rtx
2118 find_reg_equal_equiv_note (const_rtx insn)
2119 {
2120 rtx link;
2121
2122 if (!INSN_P (insn))
2123 return 0;
2124
2125 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2126 if (REG_NOTE_KIND (link) == REG_EQUAL
2127 || REG_NOTE_KIND (link) == REG_EQUIV)
2128 {
2129 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2130 insns that have multiple sets. Checking single_set to
2131 make sure of this is not the proper check, as explained
2132 in the comment in set_unique_reg_note.
2133
2134 This should be changed into an assert. */
2135 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2136 return 0;
2137 return link;
2138 }
2139 return NULL;
2140 }
2141
2142 /* Check whether INSN is a single_set whose source is known to be
2143 equivalent to a constant. Return that constant if so, otherwise
2144 return null. */
2145
2146 rtx
2147 find_constant_src (const rtx_insn *insn)
2148 {
2149 rtx note, set, x;
2150
2151 set = single_set (insn);
2152 if (set)
2153 {
2154 x = avoid_constant_pool_reference (SET_SRC (set));
2155 if (CONSTANT_P (x))
2156 return x;
2157 }
2158
2159 note = find_reg_equal_equiv_note (insn);
2160 if (note && CONSTANT_P (XEXP (note, 0)))
2161 return XEXP (note, 0);
2162
2163 return NULL_RTX;
2164 }
2165
2166 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2167 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2168
2169 int
2170 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2171 {
2172 /* If it's not a CALL_INSN, it can't possibly have a
2173 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2174 if (!CALL_P (insn))
2175 return 0;
2176
2177 gcc_assert (datum);
2178
2179 if (!REG_P (datum))
2180 {
2181 rtx link;
2182
2183 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2184 link;
2185 link = XEXP (link, 1))
2186 if (GET_CODE (XEXP (link, 0)) == code
2187 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2188 return 1;
2189 }
2190 else
2191 {
2192 unsigned int regno = REGNO (datum);
2193
2194 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2195 to pseudo registers, so don't bother checking. */
2196
2197 if (regno < FIRST_PSEUDO_REGISTER)
2198 {
2199 unsigned int end_regno = END_REGNO (datum);
2200 unsigned int i;
2201
2202 for (i = regno; i < end_regno; i++)
2203 if (find_regno_fusage (insn, code, i))
2204 return 1;
2205 }
2206 }
2207
2208 return 0;
2209 }
2210
2211 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2212 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2213
2214 int
2215 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2216 {
2217 rtx link;
2218
2219 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2220 to pseudo registers, so don't bother checking. */
2221
2222 if (regno >= FIRST_PSEUDO_REGISTER
2223 || !CALL_P (insn) )
2224 return 0;
2225
2226 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2227 {
2228 rtx op, reg;
2229
2230 if (GET_CODE (op = XEXP (link, 0)) == code
2231 && REG_P (reg = XEXP (op, 0))
2232 && REGNO (reg) <= regno
2233 && END_REGNO (reg) > regno)
2234 return 1;
2235 }
2236
2237 return 0;
2238 }
2239
2240 \f
2241 /* Return true if KIND is an integer REG_NOTE. */
2242
2243 static bool
2244 int_reg_note_p (enum reg_note kind)
2245 {
2246 return kind == REG_BR_PROB;
2247 }
2248
2249 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2250 stored as the pointer to the next register note. */
2251
2252 rtx
2253 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2254 {
2255 rtx note;
2256
2257 gcc_checking_assert (!int_reg_note_p (kind));
2258 switch (kind)
2259 {
2260 case REG_CC_SETTER:
2261 case REG_CC_USER:
2262 case REG_LABEL_TARGET:
2263 case REG_LABEL_OPERAND:
2264 case REG_TM:
2265 /* These types of register notes use an INSN_LIST rather than an
2266 EXPR_LIST, so that copying is done right and dumps look
2267 better. */
2268 note = alloc_INSN_LIST (datum, list);
2269 PUT_REG_NOTE_KIND (note, kind);
2270 break;
2271
2272 default:
2273 note = alloc_EXPR_LIST (kind, datum, list);
2274 break;
2275 }
2276
2277 return note;
2278 }
2279
2280 /* Add register note with kind KIND and datum DATUM to INSN. */
2281
2282 void
2283 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2284 {
2285 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2286 }
2287
2288 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2289
2290 void
2291 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2292 {
2293 gcc_checking_assert (int_reg_note_p (kind));
2294 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2295 datum, REG_NOTES (insn));
2296 }
2297
2298 /* Add a register note like NOTE to INSN. */
2299
2300 void
2301 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2302 {
2303 if (GET_CODE (note) == INT_LIST)
2304 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2305 else
2306 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2307 }
2308
2309 /* Duplicate NOTE and return the copy. */
2310 rtx
2311 duplicate_reg_note (rtx note)
2312 {
2313 reg_note kind = REG_NOTE_KIND (note);
2314
2315 if (GET_CODE (note) == INT_LIST)
2316 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2317 else if (GET_CODE (note) == EXPR_LIST)
2318 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2319 else
2320 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2321 }
2322
2323 /* Remove register note NOTE from the REG_NOTES of INSN. */
2324
2325 void
2326 remove_note (rtx_insn *insn, const_rtx note)
2327 {
2328 rtx link;
2329
2330 if (note == NULL_RTX)
2331 return;
2332
2333 if (REG_NOTES (insn) == note)
2334 REG_NOTES (insn) = XEXP (note, 1);
2335 else
2336 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2337 if (XEXP (link, 1) == note)
2338 {
2339 XEXP (link, 1) = XEXP (note, 1);
2340 break;
2341 }
2342
2343 switch (REG_NOTE_KIND (note))
2344 {
2345 case REG_EQUAL:
2346 case REG_EQUIV:
2347 df_notes_rescan (insn);
2348 break;
2349 default:
2350 break;
2351 }
2352 }
2353
2354 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2355
2356 void
2357 remove_reg_equal_equiv_notes (rtx_insn *insn)
2358 {
2359 rtx *loc;
2360
2361 loc = &REG_NOTES (insn);
2362 while (*loc)
2363 {
2364 enum reg_note kind = REG_NOTE_KIND (*loc);
2365 if (kind == REG_EQUAL || kind == REG_EQUIV)
2366 *loc = XEXP (*loc, 1);
2367 else
2368 loc = &XEXP (*loc, 1);
2369 }
2370 }
2371
2372 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2373
2374 void
2375 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2376 {
2377 df_ref eq_use;
2378
2379 if (!df)
2380 return;
2381
2382 /* This loop is a little tricky. We cannot just go down the chain because
2383 it is being modified by some actions in the loop. So we just iterate
2384 over the head. We plan to drain the list anyway. */
2385 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2386 {
2387 rtx_insn *insn = DF_REF_INSN (eq_use);
2388 rtx note = find_reg_equal_equiv_note (insn);
2389
2390 /* This assert is generally triggered when someone deletes a REG_EQUAL
2391 or REG_EQUIV note by hacking the list manually rather than calling
2392 remove_note. */
2393 gcc_assert (note);
2394
2395 remove_note (insn, note);
2396 }
2397 }
2398
2399 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2400 return 1 if it is found. A simple equality test is used to determine if
2401 NODE matches. */
2402
2403 bool
2404 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2405 {
2406 const_rtx x;
2407
2408 for (x = listp; x; x = XEXP (x, 1))
2409 if (node == XEXP (x, 0))
2410 return true;
2411
2412 return false;
2413 }
2414
2415 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2416 remove that entry from the list if it is found.
2417
2418 A simple equality test is used to determine if NODE matches. */
2419
2420 void
2421 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2422 {
2423 rtx_expr_list *temp = *listp;
2424 rtx_expr_list *prev = NULL;
2425
2426 while (temp)
2427 {
2428 if (node == temp->element ())
2429 {
2430 /* Splice the node out of the list. */
2431 if (prev)
2432 XEXP (prev, 1) = temp->next ();
2433 else
2434 *listp = temp->next ();
2435
2436 return;
2437 }
2438
2439 prev = temp;
2440 temp = temp->next ();
2441 }
2442 }
2443
2444 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2445 remove that entry from the list if it is found.
2446
2447 A simple equality test is used to determine if NODE matches. */
2448
2449 void
2450 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2451 {
2452 rtx_insn_list *temp = *listp;
2453 rtx_insn_list *prev = NULL;
2454
2455 while (temp)
2456 {
2457 if (node == temp->insn ())
2458 {
2459 /* Splice the node out of the list. */
2460 if (prev)
2461 XEXP (prev, 1) = temp->next ();
2462 else
2463 *listp = temp->next ();
2464
2465 return;
2466 }
2467
2468 prev = temp;
2469 temp = temp->next ();
2470 }
2471 }
2472 \f
2473 /* Nonzero if X contains any volatile instructions. These are instructions
2474 which may cause unpredictable machine state instructions, and thus no
2475 instructions or register uses should be moved or combined across them.
2476 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2477
2478 int
2479 volatile_insn_p (const_rtx x)
2480 {
2481 const RTX_CODE code = GET_CODE (x);
2482 switch (code)
2483 {
2484 case LABEL_REF:
2485 case SYMBOL_REF:
2486 case CONST:
2487 CASE_CONST_ANY:
2488 case CC0:
2489 case PC:
2490 case REG:
2491 case SCRATCH:
2492 case CLOBBER:
2493 case ADDR_VEC:
2494 case ADDR_DIFF_VEC:
2495 case CALL:
2496 case MEM:
2497 return 0;
2498
2499 case UNSPEC_VOLATILE:
2500 return 1;
2501
2502 case ASM_INPUT:
2503 case ASM_OPERANDS:
2504 if (MEM_VOLATILE_P (x))
2505 return 1;
2506
2507 default:
2508 break;
2509 }
2510
2511 /* Recursively scan the operands of this expression. */
2512
2513 {
2514 const char *const fmt = GET_RTX_FORMAT (code);
2515 int i;
2516
2517 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2518 {
2519 if (fmt[i] == 'e')
2520 {
2521 if (volatile_insn_p (XEXP (x, i)))
2522 return 1;
2523 }
2524 else if (fmt[i] == 'E')
2525 {
2526 int j;
2527 for (j = 0; j < XVECLEN (x, i); j++)
2528 if (volatile_insn_p (XVECEXP (x, i, j)))
2529 return 1;
2530 }
2531 }
2532 }
2533 return 0;
2534 }
2535
2536 /* Nonzero if X contains any volatile memory references
2537 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2538
2539 int
2540 volatile_refs_p (const_rtx x)
2541 {
2542 const RTX_CODE code = GET_CODE (x);
2543 switch (code)
2544 {
2545 case LABEL_REF:
2546 case SYMBOL_REF:
2547 case CONST:
2548 CASE_CONST_ANY:
2549 case CC0:
2550 case PC:
2551 case REG:
2552 case SCRATCH:
2553 case CLOBBER:
2554 case ADDR_VEC:
2555 case ADDR_DIFF_VEC:
2556 return 0;
2557
2558 case UNSPEC_VOLATILE:
2559 return 1;
2560
2561 case MEM:
2562 case ASM_INPUT:
2563 case ASM_OPERANDS:
2564 if (MEM_VOLATILE_P (x))
2565 return 1;
2566
2567 default:
2568 break;
2569 }
2570
2571 /* Recursively scan the operands of this expression. */
2572
2573 {
2574 const char *const fmt = GET_RTX_FORMAT (code);
2575 int i;
2576
2577 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2578 {
2579 if (fmt[i] == 'e')
2580 {
2581 if (volatile_refs_p (XEXP (x, i)))
2582 return 1;
2583 }
2584 else if (fmt[i] == 'E')
2585 {
2586 int j;
2587 for (j = 0; j < XVECLEN (x, i); j++)
2588 if (volatile_refs_p (XVECEXP (x, i, j)))
2589 return 1;
2590 }
2591 }
2592 }
2593 return 0;
2594 }
2595
2596 /* Similar to above, except that it also rejects register pre- and post-
2597 incrementing. */
2598
2599 int
2600 side_effects_p (const_rtx x)
2601 {
2602 const RTX_CODE code = GET_CODE (x);
2603 switch (code)
2604 {
2605 case LABEL_REF:
2606 case SYMBOL_REF:
2607 case CONST:
2608 CASE_CONST_ANY:
2609 case CC0:
2610 case PC:
2611 case REG:
2612 case SCRATCH:
2613 case ADDR_VEC:
2614 case ADDR_DIFF_VEC:
2615 case VAR_LOCATION:
2616 return 0;
2617
2618 case CLOBBER:
2619 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2620 when some combination can't be done. If we see one, don't think
2621 that we can simplify the expression. */
2622 return (GET_MODE (x) != VOIDmode);
2623
2624 case PRE_INC:
2625 case PRE_DEC:
2626 case POST_INC:
2627 case POST_DEC:
2628 case PRE_MODIFY:
2629 case POST_MODIFY:
2630 case CALL:
2631 case UNSPEC_VOLATILE:
2632 return 1;
2633
2634 case MEM:
2635 case ASM_INPUT:
2636 case ASM_OPERANDS:
2637 if (MEM_VOLATILE_P (x))
2638 return 1;
2639
2640 default:
2641 break;
2642 }
2643
2644 /* Recursively scan the operands of this expression. */
2645
2646 {
2647 const char *fmt = GET_RTX_FORMAT (code);
2648 int i;
2649
2650 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2651 {
2652 if (fmt[i] == 'e')
2653 {
2654 if (side_effects_p (XEXP (x, i)))
2655 return 1;
2656 }
2657 else if (fmt[i] == 'E')
2658 {
2659 int j;
2660 for (j = 0; j < XVECLEN (x, i); j++)
2661 if (side_effects_p (XVECEXP (x, i, j)))
2662 return 1;
2663 }
2664 }
2665 }
2666 return 0;
2667 }
2668 \f
2669 /* Return nonzero if evaluating rtx X might cause a trap.
2670 FLAGS controls how to consider MEMs. A nonzero means the context
2671 of the access may have changed from the original, such that the
2672 address may have become invalid. */
2673
2674 int
2675 may_trap_p_1 (const_rtx x, unsigned flags)
2676 {
2677 int i;
2678 enum rtx_code code;
2679 const char *fmt;
2680
2681 /* We make no distinction currently, but this function is part of
2682 the internal target-hooks ABI so we keep the parameter as
2683 "unsigned flags". */
2684 bool code_changed = flags != 0;
2685
2686 if (x == 0)
2687 return 0;
2688 code = GET_CODE (x);
2689 switch (code)
2690 {
2691 /* Handle these cases quickly. */
2692 CASE_CONST_ANY:
2693 case SYMBOL_REF:
2694 case LABEL_REF:
2695 case CONST:
2696 case PC:
2697 case CC0:
2698 case REG:
2699 case SCRATCH:
2700 return 0;
2701
2702 case UNSPEC:
2703 return targetm.unspec_may_trap_p (x, flags);
2704
2705 case UNSPEC_VOLATILE:
2706 case ASM_INPUT:
2707 case TRAP_IF:
2708 return 1;
2709
2710 case ASM_OPERANDS:
2711 return MEM_VOLATILE_P (x);
2712
2713 /* Memory ref can trap unless it's a static var or a stack slot. */
2714 case MEM:
2715 /* Recognize specific pattern of stack checking probes. */
2716 if (flag_stack_check
2717 && MEM_VOLATILE_P (x)
2718 && XEXP (x, 0) == stack_pointer_rtx)
2719 return 1;
2720 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2721 reference; moving it out of context such as when moving code
2722 when optimizing, might cause its address to become invalid. */
2723 code_changed
2724 || !MEM_NOTRAP_P (x))
2725 {
2726 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2727 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2728 GET_MODE (x), code_changed);
2729 }
2730
2731 return 0;
2732
2733 /* Division by a non-constant might trap. */
2734 case DIV:
2735 case MOD:
2736 case UDIV:
2737 case UMOD:
2738 if (HONOR_SNANS (x))
2739 return 1;
2740 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2741 return flag_trapping_math;
2742 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2743 return 1;
2744 break;
2745
2746 case EXPR_LIST:
2747 /* An EXPR_LIST is used to represent a function call. This
2748 certainly may trap. */
2749 return 1;
2750
2751 case GE:
2752 case GT:
2753 case LE:
2754 case LT:
2755 case LTGT:
2756 case COMPARE:
2757 /* Some floating point comparisons may trap. */
2758 if (!flag_trapping_math)
2759 break;
2760 /* ??? There is no machine independent way to check for tests that trap
2761 when COMPARE is used, though many targets do make this distinction.
2762 For instance, sparc uses CCFPE for compares which generate exceptions
2763 and CCFP for compares which do not generate exceptions. */
2764 if (HONOR_NANS (x))
2765 return 1;
2766 /* But often the compare has some CC mode, so check operand
2767 modes as well. */
2768 if (HONOR_NANS (XEXP (x, 0))
2769 || HONOR_NANS (XEXP (x, 1)))
2770 return 1;
2771 break;
2772
2773 case EQ:
2774 case NE:
2775 if (HONOR_SNANS (x))
2776 return 1;
2777 /* Often comparison is CC mode, so check operand modes. */
2778 if (HONOR_SNANS (XEXP (x, 0))
2779 || HONOR_SNANS (XEXP (x, 1)))
2780 return 1;
2781 break;
2782
2783 case FIX:
2784 /* Conversion of floating point might trap. */
2785 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2786 return 1;
2787 break;
2788
2789 case NEG:
2790 case ABS:
2791 case SUBREG:
2792 /* These operations don't trap even with floating point. */
2793 break;
2794
2795 default:
2796 /* Any floating arithmetic may trap. */
2797 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2798 return 1;
2799 }
2800
2801 fmt = GET_RTX_FORMAT (code);
2802 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2803 {
2804 if (fmt[i] == 'e')
2805 {
2806 if (may_trap_p_1 (XEXP (x, i), flags))
2807 return 1;
2808 }
2809 else if (fmt[i] == 'E')
2810 {
2811 int j;
2812 for (j = 0; j < XVECLEN (x, i); j++)
2813 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2814 return 1;
2815 }
2816 }
2817 return 0;
2818 }
2819
2820 /* Return nonzero if evaluating rtx X might cause a trap. */
2821
2822 int
2823 may_trap_p (const_rtx x)
2824 {
2825 return may_trap_p_1 (x, 0);
2826 }
2827
2828 /* Same as above, but additionally return nonzero if evaluating rtx X might
2829 cause a fault. We define a fault for the purpose of this function as a
2830 erroneous execution condition that cannot be encountered during the normal
2831 execution of a valid program; the typical example is an unaligned memory
2832 access on a strict alignment machine. The compiler guarantees that it
2833 doesn't generate code that will fault from a valid program, but this
2834 guarantee doesn't mean anything for individual instructions. Consider
2835 the following example:
2836
2837 struct S { int d; union { char *cp; int *ip; }; };
2838
2839 int foo(struct S *s)
2840 {
2841 if (s->d == 1)
2842 return *s->ip;
2843 else
2844 return *s->cp;
2845 }
2846
2847 on a strict alignment machine. In a valid program, foo will never be
2848 invoked on a structure for which d is equal to 1 and the underlying
2849 unique field of the union not aligned on a 4-byte boundary, but the
2850 expression *s->ip might cause a fault if considered individually.
2851
2852 At the RTL level, potentially problematic expressions will almost always
2853 verify may_trap_p; for example, the above dereference can be emitted as
2854 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2855 However, suppose that foo is inlined in a caller that causes s->cp to
2856 point to a local character variable and guarantees that s->d is not set
2857 to 1; foo may have been effectively translated into pseudo-RTL as:
2858
2859 if ((reg:SI) == 1)
2860 (set (reg:SI) (mem:SI (%fp - 7)))
2861 else
2862 (set (reg:QI) (mem:QI (%fp - 7)))
2863
2864 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2865 memory reference to a stack slot, but it will certainly cause a fault
2866 on a strict alignment machine. */
2867
2868 int
2869 may_trap_or_fault_p (const_rtx x)
2870 {
2871 return may_trap_p_1 (x, 1);
2872 }
2873 \f
2874 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2875 i.e., an inequality. */
2876
2877 int
2878 inequality_comparisons_p (const_rtx x)
2879 {
2880 const char *fmt;
2881 int len, i;
2882 const enum rtx_code code = GET_CODE (x);
2883
2884 switch (code)
2885 {
2886 case REG:
2887 case SCRATCH:
2888 case PC:
2889 case CC0:
2890 CASE_CONST_ANY:
2891 case CONST:
2892 case LABEL_REF:
2893 case SYMBOL_REF:
2894 return 0;
2895
2896 case LT:
2897 case LTU:
2898 case GT:
2899 case GTU:
2900 case LE:
2901 case LEU:
2902 case GE:
2903 case GEU:
2904 return 1;
2905
2906 default:
2907 break;
2908 }
2909
2910 len = GET_RTX_LENGTH (code);
2911 fmt = GET_RTX_FORMAT (code);
2912
2913 for (i = 0; i < len; i++)
2914 {
2915 if (fmt[i] == 'e')
2916 {
2917 if (inequality_comparisons_p (XEXP (x, i)))
2918 return 1;
2919 }
2920 else if (fmt[i] == 'E')
2921 {
2922 int j;
2923 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2924 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2925 return 1;
2926 }
2927 }
2928
2929 return 0;
2930 }
2931 \f
2932 /* Replace any occurrence of FROM in X with TO. The function does
2933 not enter into CONST_DOUBLE for the replace.
2934
2935 Note that copying is not done so X must not be shared unless all copies
2936 are to be modified.
2937
2938 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2939 those pointer-equal ones. */
2940
2941 rtx
2942 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
2943 {
2944 int i, j;
2945 const char *fmt;
2946
2947 if (x == from)
2948 return to;
2949
2950 /* Allow this function to make replacements in EXPR_LISTs. */
2951 if (x == 0)
2952 return 0;
2953
2954 if (all_regs
2955 && REG_P (x)
2956 && REG_P (from)
2957 && REGNO (x) == REGNO (from))
2958 {
2959 gcc_assert (GET_MODE (x) == GET_MODE (from));
2960 return to;
2961 }
2962 else if (GET_CODE (x) == SUBREG)
2963 {
2964 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
2965
2966 if (CONST_INT_P (new_rtx))
2967 {
2968 x = simplify_subreg (GET_MODE (x), new_rtx,
2969 GET_MODE (SUBREG_REG (x)),
2970 SUBREG_BYTE (x));
2971 gcc_assert (x);
2972 }
2973 else
2974 SUBREG_REG (x) = new_rtx;
2975
2976 return x;
2977 }
2978 else if (GET_CODE (x) == ZERO_EXTEND)
2979 {
2980 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
2981
2982 if (CONST_INT_P (new_rtx))
2983 {
2984 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2985 new_rtx, GET_MODE (XEXP (x, 0)));
2986 gcc_assert (x);
2987 }
2988 else
2989 XEXP (x, 0) = new_rtx;
2990
2991 return x;
2992 }
2993
2994 fmt = GET_RTX_FORMAT (GET_CODE (x));
2995 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2996 {
2997 if (fmt[i] == 'e')
2998 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
2999 else if (fmt[i] == 'E')
3000 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3001 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3002 from, to, all_regs);
3003 }
3004
3005 return x;
3006 }
3007 \f
3008 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3009 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3010
3011 void
3012 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3013 {
3014 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3015 rtx x = *loc;
3016 if (JUMP_TABLE_DATA_P (x))
3017 {
3018 x = PATTERN (x);
3019 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3020 int len = GET_NUM_ELEM (vec);
3021 for (int i = 0; i < len; ++i)
3022 {
3023 rtx ref = RTVEC_ELT (vec, i);
3024 if (XEXP (ref, 0) == old_label)
3025 {
3026 XEXP (ref, 0) = new_label;
3027 if (update_label_nuses)
3028 {
3029 ++LABEL_NUSES (new_label);
3030 --LABEL_NUSES (old_label);
3031 }
3032 }
3033 }
3034 return;
3035 }
3036
3037 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3038 field. This is not handled by the iterator because it doesn't
3039 handle unprinted ('0') fields. */
3040 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3041 JUMP_LABEL (x) = new_label;
3042
3043 subrtx_ptr_iterator::array_type array;
3044 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3045 {
3046 rtx *loc = *iter;
3047 if (rtx x = *loc)
3048 {
3049 if (GET_CODE (x) == SYMBOL_REF
3050 && CONSTANT_POOL_ADDRESS_P (x))
3051 {
3052 rtx c = get_pool_constant (x);
3053 if (rtx_referenced_p (old_label, c))
3054 {
3055 /* Create a copy of constant C; replace the label inside
3056 but do not update LABEL_NUSES because uses in constant pool
3057 are not counted. */
3058 rtx new_c = copy_rtx (c);
3059 replace_label (&new_c, old_label, new_label, false);
3060
3061 /* Add the new constant NEW_C to constant pool and replace
3062 the old reference to constant by new reference. */
3063 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3064 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3065 }
3066 }
3067
3068 if ((GET_CODE (x) == LABEL_REF
3069 || GET_CODE (x) == INSN_LIST)
3070 && XEXP (x, 0) == old_label)
3071 {
3072 XEXP (x, 0) = new_label;
3073 if (update_label_nuses)
3074 {
3075 ++LABEL_NUSES (new_label);
3076 --LABEL_NUSES (old_label);
3077 }
3078 }
3079 }
3080 }
3081 }
3082
3083 void
3084 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3085 rtx_insn *new_label, bool update_label_nuses)
3086 {
3087 rtx insn_as_rtx = insn;
3088 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3089 gcc_checking_assert (insn_as_rtx == insn);
3090 }
3091
3092 /* Return true if X is referenced in BODY. */
3093
3094 bool
3095 rtx_referenced_p (const_rtx x, const_rtx body)
3096 {
3097 subrtx_iterator::array_type array;
3098 FOR_EACH_SUBRTX (iter, array, body, ALL)
3099 if (const_rtx y = *iter)
3100 {
3101 /* Check if a label_ref Y refers to label X. */
3102 if (GET_CODE (y) == LABEL_REF
3103 && LABEL_P (x)
3104 && label_ref_label (y) == x)
3105 return true;
3106
3107 if (rtx_equal_p (x, y))
3108 return true;
3109
3110 /* If Y is a reference to pool constant traverse the constant. */
3111 if (GET_CODE (y) == SYMBOL_REF
3112 && CONSTANT_POOL_ADDRESS_P (y))
3113 iter.substitute (get_pool_constant (y));
3114 }
3115 return false;
3116 }
3117
3118 /* If INSN is a tablejump return true and store the label (before jump table) to
3119 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3120
3121 bool
3122 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3123 rtx_jump_table_data **tablep)
3124 {
3125 if (!JUMP_P (insn))
3126 return false;
3127
3128 rtx target = JUMP_LABEL (insn);
3129 if (target == NULL_RTX || ANY_RETURN_P (target))
3130 return false;
3131
3132 rtx_insn *label = as_a<rtx_insn *> (target);
3133 rtx_insn *table = next_insn (label);
3134 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3135 return false;
3136
3137 if (labelp)
3138 *labelp = label;
3139 if (tablep)
3140 *tablep = as_a <rtx_jump_table_data *> (table);
3141 return true;
3142 }
3143
3144 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3145 constant that is not in the constant pool and not in the condition
3146 of an IF_THEN_ELSE. */
3147
3148 static int
3149 computed_jump_p_1 (const_rtx x)
3150 {
3151 const enum rtx_code code = GET_CODE (x);
3152 int i, j;
3153 const char *fmt;
3154
3155 switch (code)
3156 {
3157 case LABEL_REF:
3158 case PC:
3159 return 0;
3160
3161 case CONST:
3162 CASE_CONST_ANY:
3163 case SYMBOL_REF:
3164 case REG:
3165 return 1;
3166
3167 case MEM:
3168 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3169 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3170
3171 case IF_THEN_ELSE:
3172 return (computed_jump_p_1 (XEXP (x, 1))
3173 || computed_jump_p_1 (XEXP (x, 2)));
3174
3175 default:
3176 break;
3177 }
3178
3179 fmt = GET_RTX_FORMAT (code);
3180 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3181 {
3182 if (fmt[i] == 'e'
3183 && computed_jump_p_1 (XEXP (x, i)))
3184 return 1;
3185
3186 else if (fmt[i] == 'E')
3187 for (j = 0; j < XVECLEN (x, i); j++)
3188 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3189 return 1;
3190 }
3191
3192 return 0;
3193 }
3194
3195 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3196
3197 Tablejumps and casesi insns are not considered indirect jumps;
3198 we can recognize them by a (use (label_ref)). */
3199
3200 int
3201 computed_jump_p (const rtx_insn *insn)
3202 {
3203 int i;
3204 if (JUMP_P (insn))
3205 {
3206 rtx pat = PATTERN (insn);
3207
3208 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3209 if (JUMP_LABEL (insn) != NULL)
3210 return 0;
3211
3212 if (GET_CODE (pat) == PARALLEL)
3213 {
3214 int len = XVECLEN (pat, 0);
3215 int has_use_labelref = 0;
3216
3217 for (i = len - 1; i >= 0; i--)
3218 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3219 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3220 == LABEL_REF))
3221 {
3222 has_use_labelref = 1;
3223 break;
3224 }
3225
3226 if (! has_use_labelref)
3227 for (i = len - 1; i >= 0; i--)
3228 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3229 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3230 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3231 return 1;
3232 }
3233 else if (GET_CODE (pat) == SET
3234 && SET_DEST (pat) == pc_rtx
3235 && computed_jump_p_1 (SET_SRC (pat)))
3236 return 1;
3237 }
3238 return 0;
3239 }
3240
3241 \f
3242
3243 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3244 the equivalent add insn and pass the result to FN, using DATA as the
3245 final argument. */
3246
3247 static int
3248 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3249 {
3250 rtx x = XEXP (mem, 0);
3251 switch (GET_CODE (x))
3252 {
3253 case PRE_INC:
3254 case POST_INC:
3255 {
3256 int size = GET_MODE_SIZE (GET_MODE (mem));
3257 rtx r1 = XEXP (x, 0);
3258 rtx c = gen_int_mode (size, GET_MODE (r1));
3259 return fn (mem, x, r1, r1, c, data);
3260 }
3261
3262 case PRE_DEC:
3263 case POST_DEC:
3264 {
3265 int size = GET_MODE_SIZE (GET_MODE (mem));
3266 rtx r1 = XEXP (x, 0);
3267 rtx c = gen_int_mode (-size, GET_MODE (r1));
3268 return fn (mem, x, r1, r1, c, data);
3269 }
3270
3271 case PRE_MODIFY:
3272 case POST_MODIFY:
3273 {
3274 rtx r1 = XEXP (x, 0);
3275 rtx add = XEXP (x, 1);
3276 return fn (mem, x, r1, add, NULL, data);
3277 }
3278
3279 default:
3280 gcc_unreachable ();
3281 }
3282 }
3283
3284 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3285 For each such autoinc operation found, call FN, passing it
3286 the innermost enclosing MEM, the operation itself, the RTX modified
3287 by the operation, two RTXs (the second may be NULL) that, once
3288 added, represent the value to be held by the modified RTX
3289 afterwards, and DATA. FN is to return 0 to continue the
3290 traversal or any other value to have it returned to the caller of
3291 for_each_inc_dec. */
3292
3293 int
3294 for_each_inc_dec (rtx x,
3295 for_each_inc_dec_fn fn,
3296 void *data)
3297 {
3298 subrtx_var_iterator::array_type array;
3299 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3300 {
3301 rtx mem = *iter;
3302 if (mem
3303 && MEM_P (mem)
3304 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3305 {
3306 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3307 if (res != 0)
3308 return res;
3309 iter.skip_subrtxes ();
3310 }
3311 }
3312 return 0;
3313 }
3314
3315 \f
3316 /* Searches X for any reference to REGNO, returning the rtx of the
3317 reference found if any. Otherwise, returns NULL_RTX. */
3318
3319 rtx
3320 regno_use_in (unsigned int regno, rtx x)
3321 {
3322 const char *fmt;
3323 int i, j;
3324 rtx tem;
3325
3326 if (REG_P (x) && REGNO (x) == regno)
3327 return x;
3328
3329 fmt = GET_RTX_FORMAT (GET_CODE (x));
3330 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3331 {
3332 if (fmt[i] == 'e')
3333 {
3334 if ((tem = regno_use_in (regno, XEXP (x, i))))
3335 return tem;
3336 }
3337 else if (fmt[i] == 'E')
3338 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3339 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3340 return tem;
3341 }
3342
3343 return NULL_RTX;
3344 }
3345
3346 /* Return a value indicating whether OP, an operand of a commutative
3347 operation, is preferred as the first or second operand. The more
3348 positive the value, the stronger the preference for being the first
3349 operand. */
3350
3351 int
3352 commutative_operand_precedence (rtx op)
3353 {
3354 enum rtx_code code = GET_CODE (op);
3355
3356 /* Constants always become the second operand. Prefer "nice" constants. */
3357 if (code == CONST_INT)
3358 return -8;
3359 if (code == CONST_WIDE_INT)
3360 return -7;
3361 if (code == CONST_DOUBLE)
3362 return -7;
3363 if (code == CONST_FIXED)
3364 return -7;
3365 op = avoid_constant_pool_reference (op);
3366 code = GET_CODE (op);
3367
3368 switch (GET_RTX_CLASS (code))
3369 {
3370 case RTX_CONST_OBJ:
3371 if (code == CONST_INT)
3372 return -6;
3373 if (code == CONST_WIDE_INT)
3374 return -6;
3375 if (code == CONST_DOUBLE)
3376 return -5;
3377 if (code == CONST_FIXED)
3378 return -5;
3379 return -4;
3380
3381 case RTX_EXTRA:
3382 /* SUBREGs of objects should come second. */
3383 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3384 return -3;
3385 return 0;
3386
3387 case RTX_OBJ:
3388 /* Complex expressions should be the first, so decrease priority
3389 of objects. Prefer pointer objects over non pointer objects. */
3390 if ((REG_P (op) && REG_POINTER (op))
3391 || (MEM_P (op) && MEM_POINTER (op)))
3392 return -1;
3393 return -2;
3394
3395 case RTX_COMM_ARITH:
3396 /* Prefer operands that are themselves commutative to be first.
3397 This helps to make things linear. In particular,
3398 (and (and (reg) (reg)) (not (reg))) is canonical. */
3399 return 4;
3400
3401 case RTX_BIN_ARITH:
3402 /* If only one operand is a binary expression, it will be the first
3403 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3404 is canonical, although it will usually be further simplified. */
3405 return 2;
3406
3407 case RTX_UNARY:
3408 /* Then prefer NEG and NOT. */
3409 if (code == NEG || code == NOT)
3410 return 1;
3411 /* FALLTHRU */
3412
3413 default:
3414 return 0;
3415 }
3416 }
3417
3418 /* Return 1 iff it is necessary to swap operands of commutative operation
3419 in order to canonicalize expression. */
3420
3421 bool
3422 swap_commutative_operands_p (rtx x, rtx y)
3423 {
3424 return (commutative_operand_precedence (x)
3425 < commutative_operand_precedence (y));
3426 }
3427
3428 /* Return 1 if X is an autoincrement side effect and the register is
3429 not the stack pointer. */
3430 int
3431 auto_inc_p (const_rtx x)
3432 {
3433 switch (GET_CODE (x))
3434 {
3435 case PRE_INC:
3436 case POST_INC:
3437 case PRE_DEC:
3438 case POST_DEC:
3439 case PRE_MODIFY:
3440 case POST_MODIFY:
3441 /* There are no REG_INC notes for SP. */
3442 if (XEXP (x, 0) != stack_pointer_rtx)
3443 return 1;
3444 default:
3445 break;
3446 }
3447 return 0;
3448 }
3449
3450 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3451 int
3452 loc_mentioned_in_p (rtx *loc, const_rtx in)
3453 {
3454 enum rtx_code code;
3455 const char *fmt;
3456 int i, j;
3457
3458 if (!in)
3459 return 0;
3460
3461 code = GET_CODE (in);
3462 fmt = GET_RTX_FORMAT (code);
3463 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3464 {
3465 if (fmt[i] == 'e')
3466 {
3467 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3468 return 1;
3469 }
3470 else if (fmt[i] == 'E')
3471 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3472 if (loc == &XVECEXP (in, i, j)
3473 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3474 return 1;
3475 }
3476 return 0;
3477 }
3478
3479 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3480 and SUBREG_BYTE, return the bit offset where the subreg begins
3481 (counting from the least significant bit of the operand). */
3482
3483 unsigned int
3484 subreg_lsb_1 (machine_mode outer_mode,
3485 machine_mode inner_mode,
3486 unsigned int subreg_byte)
3487 {
3488 unsigned int bitpos;
3489 unsigned int byte;
3490 unsigned int word;
3491
3492 /* A paradoxical subreg begins at bit position 0. */
3493 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3494 return 0;
3495
3496 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3497 /* If the subreg crosses a word boundary ensure that
3498 it also begins and ends on a word boundary. */
3499 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3500 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3501 && (subreg_byte % UNITS_PER_WORD
3502 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3503
3504 if (WORDS_BIG_ENDIAN)
3505 word = (GET_MODE_SIZE (inner_mode)
3506 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3507 else
3508 word = subreg_byte / UNITS_PER_WORD;
3509 bitpos = word * BITS_PER_WORD;
3510
3511 if (BYTES_BIG_ENDIAN)
3512 byte = (GET_MODE_SIZE (inner_mode)
3513 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3514 else
3515 byte = subreg_byte % UNITS_PER_WORD;
3516 bitpos += byte * BITS_PER_UNIT;
3517
3518 return bitpos;
3519 }
3520
3521 /* Given a subreg X, return the bit offset where the subreg begins
3522 (counting from the least significant bit of the reg). */
3523
3524 unsigned int
3525 subreg_lsb (const_rtx x)
3526 {
3527 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3528 SUBREG_BYTE (x));
3529 }
3530
3531 /* Return the subreg byte offset for a subreg whose outer value has
3532 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3533 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3534 lsb of the inner value. This is the inverse of the calculation
3535 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3536
3537 unsigned int
3538 subreg_size_offset_from_lsb (unsigned int outer_bytes,
3539 unsigned int inner_bytes,
3540 unsigned int lsb_shift)
3541 {
3542 /* A paradoxical subreg begins at bit position 0. */
3543 if (outer_bytes > inner_bytes)
3544 {
3545 gcc_checking_assert (lsb_shift == 0);
3546 return 0;
3547 }
3548
3549 gcc_assert (lsb_shift % BITS_PER_UNIT == 0);
3550 unsigned int lower_bytes = lsb_shift / BITS_PER_UNIT;
3551 unsigned int upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3552 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3553 return upper_bytes;
3554 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3555 return lower_bytes;
3556 else
3557 {
3558 unsigned int lower_word_part = lower_bytes & -UNITS_PER_WORD;
3559 unsigned int upper_word_part = upper_bytes & -UNITS_PER_WORD;
3560 if (WORDS_BIG_ENDIAN)
3561 return upper_word_part + (lower_bytes - lower_word_part);
3562 else
3563 return lower_word_part + (upper_bytes - upper_word_part);
3564 }
3565 }
3566
3567 /* Fill in information about a subreg of a hard register.
3568 xregno - A regno of an inner hard subreg_reg (or what will become one).
3569 xmode - The mode of xregno.
3570 offset - The byte offset.
3571 ymode - The mode of a top level SUBREG (or what may become one).
3572 info - Pointer to structure to fill in.
3573
3574 Rather than considering one particular inner register (and thus one
3575 particular "outer" register) in isolation, this function really uses
3576 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3577 function does not check whether adding INFO->offset to XREGNO gives
3578 a valid hard register; even if INFO->offset + XREGNO is out of range,
3579 there might be another register of the same type that is in range.
3580 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3581 register, since that can depend on things like whether the final
3582 register number is even or odd. Callers that want to check whether
3583 this particular subreg can be replaced by a simple (reg ...) should
3584 use simplify_subreg_regno. */
3585
3586 void
3587 subreg_get_info (unsigned int xregno, machine_mode xmode,
3588 unsigned int offset, machine_mode ymode,
3589 struct subreg_info *info)
3590 {
3591 int nregs_xmode, nregs_ymode;
3592 int mode_multiple, nregs_multiple;
3593 int offset_adj, y_offset, y_offset_adj;
3594 int regsize_xmode, regsize_ymode;
3595 bool rknown;
3596
3597 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3598
3599 rknown = false;
3600
3601 /* If there are holes in a non-scalar mode in registers, we expect
3602 that it is made up of its units concatenated together. */
3603 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3604 {
3605 machine_mode xmode_unit;
3606
3607 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3608 xmode_unit = GET_MODE_INNER (xmode);
3609 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3610 gcc_assert (nregs_xmode
3611 == (GET_MODE_NUNITS (xmode)
3612 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3613 gcc_assert (hard_regno_nregs[xregno][xmode]
3614 == (hard_regno_nregs[xregno][xmode_unit]
3615 * GET_MODE_NUNITS (xmode)));
3616
3617 /* You can only ask for a SUBREG of a value with holes in the middle
3618 if you don't cross the holes. (Such a SUBREG should be done by
3619 picking a different register class, or doing it in memory if
3620 necessary.) An example of a value with holes is XCmode on 32-bit
3621 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3622 3 for each part, but in memory it's two 128-bit parts.
3623 Padding is assumed to be at the end (not necessarily the 'high part')
3624 of each unit. */
3625 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3626 < GET_MODE_NUNITS (xmode))
3627 && (offset / GET_MODE_SIZE (xmode_unit)
3628 != ((offset + GET_MODE_SIZE (ymode) - 1)
3629 / GET_MODE_SIZE (xmode_unit))))
3630 {
3631 info->representable_p = false;
3632 rknown = true;
3633 }
3634 }
3635 else
3636 nregs_xmode = hard_regno_nregs[xregno][xmode];
3637
3638 nregs_ymode = hard_regno_nregs[xregno][ymode];
3639
3640 /* Paradoxical subregs are otherwise valid. */
3641 if (!rknown
3642 && offset == 0
3643 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3644 {
3645 info->representable_p = true;
3646 /* If this is a big endian paradoxical subreg, which uses more
3647 actual hard registers than the original register, we must
3648 return a negative offset so that we find the proper highpart
3649 of the register. */
3650 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3651 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3652 info->offset = nregs_xmode - nregs_ymode;
3653 else
3654 info->offset = 0;
3655 info->nregs = nregs_ymode;
3656 return;
3657 }
3658
3659 /* If registers store different numbers of bits in the different
3660 modes, we cannot generally form this subreg. */
3661 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3662 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3663 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3664 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3665 {
3666 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3667 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3668 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3669 {
3670 info->representable_p = false;
3671 info->nregs
3672 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3673 info->offset = offset / regsize_xmode;
3674 return;
3675 }
3676 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3677 {
3678 info->representable_p = false;
3679 info->nregs
3680 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3681 info->offset = offset / regsize_xmode;
3682 return;
3683 }
3684 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3685 would go outside of XMODE. */
3686 if (!rknown
3687 && GET_MODE_SIZE (ymode) + offset > GET_MODE_SIZE (xmode))
3688 {
3689 info->representable_p = false;
3690 info->nregs = nregs_ymode;
3691 info->offset = offset / regsize_xmode;
3692 return;
3693 }
3694 /* Quick exit for the simple and common case of extracting whole
3695 subregisters from a multiregister value. */
3696 /* ??? It would be better to integrate this into the code below,
3697 if we can generalize the concept enough and figure out how
3698 odd-sized modes can coexist with the other weird cases we support. */
3699 if (!rknown
3700 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3701 && regsize_xmode == regsize_ymode
3702 && (offset % regsize_ymode) == 0)
3703 {
3704 info->representable_p = true;
3705 info->nregs = nregs_ymode;
3706 info->offset = offset / regsize_ymode;
3707 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3708 return;
3709 }
3710 }
3711
3712 /* Lowpart subregs are otherwise valid. */
3713 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3714 {
3715 info->representable_p = true;
3716 rknown = true;
3717
3718 if (offset == 0 || nregs_xmode == nregs_ymode)
3719 {
3720 info->offset = 0;
3721 info->nregs = nregs_ymode;
3722 return;
3723 }
3724 }
3725
3726 /* This should always pass, otherwise we don't know how to verify
3727 the constraint. These conditions may be relaxed but
3728 subreg_regno_offset would need to be redesigned. */
3729 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3730 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3731
3732 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3733 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3734 {
3735 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3736 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3737 HOST_WIDE_INT off_low = offset & (ysize - 1);
3738 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3739 offset = (xsize - ysize - off_high) | off_low;
3740 }
3741 /* The XMODE value can be seen as a vector of NREGS_XMODE
3742 values. The subreg must represent a lowpart of given field.
3743 Compute what field it is. */
3744 offset_adj = offset;
3745 offset_adj -= subreg_lowpart_offset (ymode,
3746 mode_for_size (GET_MODE_BITSIZE (xmode)
3747 / nregs_xmode,
3748 MODE_INT, 0));
3749
3750 /* Size of ymode must not be greater than the size of xmode. */
3751 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3752 gcc_assert (mode_multiple != 0);
3753
3754 y_offset = offset / GET_MODE_SIZE (ymode);
3755 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3756 nregs_multiple = nregs_xmode / nregs_ymode;
3757
3758 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3759 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3760
3761 if (!rknown)
3762 {
3763 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3764 rknown = true;
3765 }
3766 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3767 info->nregs = nregs_ymode;
3768 }
3769
3770 /* This function returns the regno offset of a subreg expression.
3771 xregno - A regno of an inner hard subreg_reg (or what will become one).
3772 xmode - The mode of xregno.
3773 offset - The byte offset.
3774 ymode - The mode of a top level SUBREG (or what may become one).
3775 RETURN - The regno offset which would be used. */
3776 unsigned int
3777 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3778 unsigned int offset, machine_mode ymode)
3779 {
3780 struct subreg_info info;
3781 subreg_get_info (xregno, xmode, offset, ymode, &info);
3782 return info.offset;
3783 }
3784
3785 /* This function returns true when the offset is representable via
3786 subreg_offset in the given regno.
3787 xregno - A regno of an inner hard subreg_reg (or what will become one).
3788 xmode - The mode of xregno.
3789 offset - The byte offset.
3790 ymode - The mode of a top level SUBREG (or what may become one).
3791 RETURN - Whether the offset is representable. */
3792 bool
3793 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3794 unsigned int offset, machine_mode ymode)
3795 {
3796 struct subreg_info info;
3797 subreg_get_info (xregno, xmode, offset, ymode, &info);
3798 return info.representable_p;
3799 }
3800
3801 /* Return the number of a YMODE register to which
3802
3803 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3804
3805 can be simplified. Return -1 if the subreg can't be simplified.
3806
3807 XREGNO is a hard register number. */
3808
3809 int
3810 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3811 unsigned int offset, machine_mode ymode)
3812 {
3813 struct subreg_info info;
3814 unsigned int yregno;
3815
3816 #ifdef CANNOT_CHANGE_MODE_CLASS
3817 /* Give the backend a chance to disallow the mode change. */
3818 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3819 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3820 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3821 /* We can use mode change in LRA for some transformations. */
3822 && ! lra_in_progress)
3823 return -1;
3824 #endif
3825
3826 /* We shouldn't simplify stack-related registers. */
3827 if ((!reload_completed || frame_pointer_needed)
3828 && xregno == FRAME_POINTER_REGNUM)
3829 return -1;
3830
3831 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3832 && xregno == ARG_POINTER_REGNUM)
3833 return -1;
3834
3835 if (xregno == STACK_POINTER_REGNUM
3836 /* We should convert hard stack register in LRA if it is
3837 possible. */
3838 && ! lra_in_progress)
3839 return -1;
3840
3841 /* Try to get the register offset. */
3842 subreg_get_info (xregno, xmode, offset, ymode, &info);
3843 if (!info.representable_p)
3844 return -1;
3845
3846 /* Make sure that the offsetted register value is in range. */
3847 yregno = xregno + info.offset;
3848 if (!HARD_REGISTER_NUM_P (yregno))
3849 return -1;
3850
3851 /* See whether (reg:YMODE YREGNO) is valid.
3852
3853 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3854 This is a kludge to work around how complex FP arguments are passed
3855 on IA-64 and should be fixed. See PR target/49226. */
3856 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3857 && HARD_REGNO_MODE_OK (xregno, xmode))
3858 return -1;
3859
3860 return (int) yregno;
3861 }
3862
3863 /* Return the final regno that a subreg expression refers to. */
3864 unsigned int
3865 subreg_regno (const_rtx x)
3866 {
3867 unsigned int ret;
3868 rtx subreg = SUBREG_REG (x);
3869 int regno = REGNO (subreg);
3870
3871 ret = regno + subreg_regno_offset (regno,
3872 GET_MODE (subreg),
3873 SUBREG_BYTE (x),
3874 GET_MODE (x));
3875 return ret;
3876
3877 }
3878
3879 /* Return the number of registers that a subreg expression refers
3880 to. */
3881 unsigned int
3882 subreg_nregs (const_rtx x)
3883 {
3884 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3885 }
3886
3887 /* Return the number of registers that a subreg REG with REGNO
3888 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3889 changed so that the regno can be passed in. */
3890
3891 unsigned int
3892 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3893 {
3894 struct subreg_info info;
3895 rtx subreg = SUBREG_REG (x);
3896
3897 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3898 &info);
3899 return info.nregs;
3900 }
3901
3902 struct parms_set_data
3903 {
3904 int nregs;
3905 HARD_REG_SET regs;
3906 };
3907
3908 /* Helper function for noticing stores to parameter registers. */
3909 static void
3910 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3911 {
3912 struct parms_set_data *const d = (struct parms_set_data *) data;
3913 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3914 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3915 {
3916 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3917 d->nregs--;
3918 }
3919 }
3920
3921 /* Look backward for first parameter to be loaded.
3922 Note that loads of all parameters will not necessarily be
3923 found if CSE has eliminated some of them (e.g., an argument
3924 to the outer function is passed down as a parameter).
3925 Do not skip BOUNDARY. */
3926 rtx_insn *
3927 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3928 {
3929 struct parms_set_data parm;
3930 rtx p;
3931 rtx_insn *before, *first_set;
3932
3933 /* Since different machines initialize their parameter registers
3934 in different orders, assume nothing. Collect the set of all
3935 parameter registers. */
3936 CLEAR_HARD_REG_SET (parm.regs);
3937 parm.nregs = 0;
3938 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3939 if (GET_CODE (XEXP (p, 0)) == USE
3940 && REG_P (XEXP (XEXP (p, 0), 0))
3941 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
3942 {
3943 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3944
3945 /* We only care about registers which can hold function
3946 arguments. */
3947 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3948 continue;
3949
3950 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3951 parm.nregs++;
3952 }
3953 before = call_insn;
3954 first_set = call_insn;
3955
3956 /* Search backward for the first set of a register in this set. */
3957 while (parm.nregs && before != boundary)
3958 {
3959 before = PREV_INSN (before);
3960
3961 /* It is possible that some loads got CSEed from one call to
3962 another. Stop in that case. */
3963 if (CALL_P (before))
3964 break;
3965
3966 /* Our caller needs either ensure that we will find all sets
3967 (in case code has not been optimized yet), or take care
3968 for possible labels in a way by setting boundary to preceding
3969 CODE_LABEL. */
3970 if (LABEL_P (before))
3971 {
3972 gcc_assert (before == boundary);
3973 break;
3974 }
3975
3976 if (INSN_P (before))
3977 {
3978 int nregs_old = parm.nregs;
3979 note_stores (PATTERN (before), parms_set, &parm);
3980 /* If we found something that did not set a parameter reg,
3981 we're done. Do not keep going, as that might result
3982 in hoisting an insn before the setting of a pseudo
3983 that is used by the hoisted insn. */
3984 if (nregs_old != parm.nregs)
3985 first_set = before;
3986 else
3987 break;
3988 }
3989 }
3990 return first_set;
3991 }
3992
3993 /* Return true if we should avoid inserting code between INSN and preceding
3994 call instruction. */
3995
3996 bool
3997 keep_with_call_p (const rtx_insn *insn)
3998 {
3999 rtx set;
4000
4001 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4002 {
4003 if (REG_P (SET_DEST (set))
4004 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4005 && fixed_regs[REGNO (SET_DEST (set))]
4006 && general_operand (SET_SRC (set), VOIDmode))
4007 return true;
4008 if (REG_P (SET_SRC (set))
4009 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4010 && REG_P (SET_DEST (set))
4011 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4012 return true;
4013 /* There may be a stack pop just after the call and before the store
4014 of the return register. Search for the actual store when deciding
4015 if we can break or not. */
4016 if (SET_DEST (set) == stack_pointer_rtx)
4017 {
4018 /* This CONST_CAST is okay because next_nonnote_insn just
4019 returns its argument and we assign it to a const_rtx
4020 variable. */
4021 const rtx_insn *i2
4022 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4023 if (i2 && keep_with_call_p (i2))
4024 return true;
4025 }
4026 }
4027 return false;
4028 }
4029
4030 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4031 to non-complex jumps. That is, direct unconditional, conditional,
4032 and tablejumps, but not computed jumps or returns. It also does
4033 not apply to the fallthru case of a conditional jump. */
4034
4035 bool
4036 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4037 {
4038 rtx tmp = JUMP_LABEL (jump_insn);
4039 rtx_jump_table_data *table;
4040
4041 if (label == tmp)
4042 return true;
4043
4044 if (tablejump_p (jump_insn, NULL, &table))
4045 {
4046 rtvec vec = table->get_labels ();
4047 int i, veclen = GET_NUM_ELEM (vec);
4048
4049 for (i = 0; i < veclen; ++i)
4050 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4051 return true;
4052 }
4053
4054 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4055 return true;
4056
4057 return false;
4058 }
4059
4060 \f
4061 /* Return an estimate of the cost of computing rtx X.
4062 One use is in cse, to decide which expression to keep in the hash table.
4063 Another is in rtl generation, to pick the cheapest way to multiply.
4064 Other uses like the latter are expected in the future.
4065
4066 X appears as operand OPNO in an expression with code OUTER_CODE.
4067 SPEED specifies whether costs optimized for speed or size should
4068 be returned. */
4069
4070 int
4071 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4072 int opno, bool speed)
4073 {
4074 int i, j;
4075 enum rtx_code code;
4076 const char *fmt;
4077 int total;
4078 int factor;
4079
4080 if (x == 0)
4081 return 0;
4082
4083 if (GET_MODE (x) != VOIDmode)
4084 mode = GET_MODE (x);
4085
4086 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4087 many insns, taking N times as long. */
4088 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4089 if (factor == 0)
4090 factor = 1;
4091
4092 /* Compute the default costs of certain things.
4093 Note that targetm.rtx_costs can override the defaults. */
4094
4095 code = GET_CODE (x);
4096 switch (code)
4097 {
4098 case MULT:
4099 /* Multiplication has time-complexity O(N*N), where N is the
4100 number of units (translated from digits) when using
4101 schoolbook long multiplication. */
4102 total = factor * factor * COSTS_N_INSNS (5);
4103 break;
4104 case DIV:
4105 case UDIV:
4106 case MOD:
4107 case UMOD:
4108 /* Similarly, complexity for schoolbook long division. */
4109 total = factor * factor * COSTS_N_INSNS (7);
4110 break;
4111 case USE:
4112 /* Used in combine.c as a marker. */
4113 total = 0;
4114 break;
4115 case SET:
4116 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4117 the mode for the factor. */
4118 mode = GET_MODE (SET_DEST (x));
4119 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4120 if (factor == 0)
4121 factor = 1;
4122 /* FALLTHRU */
4123 default:
4124 total = factor * COSTS_N_INSNS (1);
4125 }
4126
4127 switch (code)
4128 {
4129 case REG:
4130 return 0;
4131
4132 case SUBREG:
4133 total = 0;
4134 /* If we can't tie these modes, make this expensive. The larger
4135 the mode, the more expensive it is. */
4136 if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4137 return COSTS_N_INSNS (2 + factor);
4138 break;
4139
4140 default:
4141 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4142 return total;
4143 break;
4144 }
4145
4146 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4147 which is already in total. */
4148
4149 fmt = GET_RTX_FORMAT (code);
4150 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4151 if (fmt[i] == 'e')
4152 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4153 else if (fmt[i] == 'E')
4154 for (j = 0; j < XVECLEN (x, i); j++)
4155 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4156
4157 return total;
4158 }
4159
4160 /* Fill in the structure C with information about both speed and size rtx
4161 costs for X, which is operand OPNO in an expression with code OUTER. */
4162
4163 void
4164 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4165 struct full_rtx_costs *c)
4166 {
4167 c->speed = rtx_cost (x, mode, outer, opno, true);
4168 c->size = rtx_cost (x, mode, outer, opno, false);
4169 }
4170
4171 \f
4172 /* Return cost of address expression X.
4173 Expect that X is properly formed address reference.
4174
4175 SPEED parameter specify whether costs optimized for speed or size should
4176 be returned. */
4177
4178 int
4179 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4180 {
4181 /* We may be asked for cost of various unusual addresses, such as operands
4182 of push instruction. It is not worthwhile to complicate writing
4183 of the target hook by such cases. */
4184
4185 if (!memory_address_addr_space_p (mode, x, as))
4186 return 1000;
4187
4188 return targetm.address_cost (x, mode, as, speed);
4189 }
4190
4191 /* If the target doesn't override, compute the cost as with arithmetic. */
4192
4193 int
4194 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4195 {
4196 return rtx_cost (x, Pmode, MEM, 0, speed);
4197 }
4198 \f
4199
4200 unsigned HOST_WIDE_INT
4201 nonzero_bits (const_rtx x, machine_mode mode)
4202 {
4203 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4204 }
4205
4206 unsigned int
4207 num_sign_bit_copies (const_rtx x, machine_mode mode)
4208 {
4209 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4210 }
4211
4212 /* Return true if nonzero_bits1 might recurse into both operands
4213 of X. */
4214
4215 static inline bool
4216 nonzero_bits_binary_arith_p (const_rtx x)
4217 {
4218 if (!ARITHMETIC_P (x))
4219 return false;
4220 switch (GET_CODE (x))
4221 {
4222 case AND:
4223 case XOR:
4224 case IOR:
4225 case UMIN:
4226 case UMAX:
4227 case SMIN:
4228 case SMAX:
4229 case PLUS:
4230 case MINUS:
4231 case MULT:
4232 case DIV:
4233 case UDIV:
4234 case MOD:
4235 case UMOD:
4236 return true;
4237 default:
4238 return false;
4239 }
4240 }
4241
4242 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4243 It avoids exponential behavior in nonzero_bits1 when X has
4244 identical subexpressions on the first or the second level. */
4245
4246 static unsigned HOST_WIDE_INT
4247 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4248 machine_mode known_mode,
4249 unsigned HOST_WIDE_INT known_ret)
4250 {
4251 if (x == known_x && mode == known_mode)
4252 return known_ret;
4253
4254 /* Try to find identical subexpressions. If found call
4255 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4256 precomputed value for the subexpression as KNOWN_RET. */
4257
4258 if (nonzero_bits_binary_arith_p (x))
4259 {
4260 rtx x0 = XEXP (x, 0);
4261 rtx x1 = XEXP (x, 1);
4262
4263 /* Check the first level. */
4264 if (x0 == x1)
4265 return nonzero_bits1 (x, mode, x0, mode,
4266 cached_nonzero_bits (x0, mode, known_x,
4267 known_mode, known_ret));
4268
4269 /* Check the second level. */
4270 if (nonzero_bits_binary_arith_p (x0)
4271 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4272 return nonzero_bits1 (x, mode, x1, mode,
4273 cached_nonzero_bits (x1, mode, known_x,
4274 known_mode, known_ret));
4275
4276 if (nonzero_bits_binary_arith_p (x1)
4277 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4278 return nonzero_bits1 (x, mode, x0, mode,
4279 cached_nonzero_bits (x0, mode, known_x,
4280 known_mode, known_ret));
4281 }
4282
4283 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4284 }
4285
4286 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4287 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4288 is less useful. We can't allow both, because that results in exponential
4289 run time recursion. There is a nullstone testcase that triggered
4290 this. This macro avoids accidental uses of num_sign_bit_copies. */
4291 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4292
4293 /* Given an expression, X, compute which bits in X can be nonzero.
4294 We don't care about bits outside of those defined in MODE.
4295
4296 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4297 an arithmetic operation, we can do better. */
4298
4299 static unsigned HOST_WIDE_INT
4300 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4301 machine_mode known_mode,
4302 unsigned HOST_WIDE_INT known_ret)
4303 {
4304 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4305 unsigned HOST_WIDE_INT inner_nz;
4306 enum rtx_code code;
4307 machine_mode inner_mode;
4308 unsigned int mode_width = GET_MODE_PRECISION (mode);
4309
4310 /* For floating-point and vector values, assume all bits are needed. */
4311 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4312 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4313 return nonzero;
4314
4315 /* If X is wider than MODE, use its mode instead. */
4316 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4317 {
4318 mode = GET_MODE (x);
4319 nonzero = GET_MODE_MASK (mode);
4320 mode_width = GET_MODE_PRECISION (mode);
4321 }
4322
4323 if (mode_width > HOST_BITS_PER_WIDE_INT)
4324 /* Our only callers in this case look for single bit values. So
4325 just return the mode mask. Those tests will then be false. */
4326 return nonzero;
4327
4328 /* If MODE is wider than X, but both are a single word for both the host
4329 and target machines, we can compute this from which bits of the
4330 object might be nonzero in its own mode, taking into account the fact
4331 that on many CISC machines, accessing an object in a wider mode
4332 causes the high-order bits to become undefined. So they are
4333 not known to be zero. */
4334
4335 if (!WORD_REGISTER_OPERATIONS
4336 && GET_MODE (x) != VOIDmode
4337 && GET_MODE (x) != mode
4338 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4339 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4340 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4341 {
4342 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4343 known_x, known_mode, known_ret);
4344 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4345 return nonzero;
4346 }
4347
4348 /* Please keep nonzero_bits_binary_arith_p above in sync with
4349 the code in the switch below. */
4350 code = GET_CODE (x);
4351 switch (code)
4352 {
4353 case REG:
4354 #if defined(POINTERS_EXTEND_UNSIGNED)
4355 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4356 all the bits above ptr_mode are known to be zero. */
4357 /* As we do not know which address space the pointer is referring to,
4358 we can do this only if the target does not support different pointer
4359 or address modes depending on the address space. */
4360 if (target_default_pointer_address_modes_p ()
4361 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4362 && REG_POINTER (x)
4363 && !targetm.have_ptr_extend ())
4364 nonzero &= GET_MODE_MASK (ptr_mode);
4365 #endif
4366
4367 /* Include declared information about alignment of pointers. */
4368 /* ??? We don't properly preserve REG_POINTER changes across
4369 pointer-to-integer casts, so we can't trust it except for
4370 things that we know must be pointers. See execute/960116-1.c. */
4371 if ((x == stack_pointer_rtx
4372 || x == frame_pointer_rtx
4373 || x == arg_pointer_rtx)
4374 && REGNO_POINTER_ALIGN (REGNO (x)))
4375 {
4376 unsigned HOST_WIDE_INT alignment
4377 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4378
4379 #ifdef PUSH_ROUNDING
4380 /* If PUSH_ROUNDING is defined, it is possible for the
4381 stack to be momentarily aligned only to that amount,
4382 so we pick the least alignment. */
4383 if (x == stack_pointer_rtx && PUSH_ARGS)
4384 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4385 alignment);
4386 #endif
4387
4388 nonzero &= ~(alignment - 1);
4389 }
4390
4391 {
4392 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4393 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4394 known_mode, known_ret,
4395 &nonzero_for_hook);
4396
4397 if (new_rtx)
4398 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4399 known_mode, known_ret);
4400
4401 return nonzero_for_hook;
4402 }
4403
4404 case CONST_INT:
4405 /* If X is negative in MODE, sign-extend the value. */
4406 if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4407 && mode_width < BITS_PER_WORD
4408 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1)))
4409 != 0)
4410 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4411
4412 return UINTVAL (x);
4413
4414 case MEM:
4415 /* In many, if not most, RISC machines, reading a byte from memory
4416 zeros the rest of the register. Noticing that fact saves a lot
4417 of extra zero-extends. */
4418 if (load_extend_op (GET_MODE (x)) == ZERO_EXTEND)
4419 nonzero &= GET_MODE_MASK (GET_MODE (x));
4420 break;
4421
4422 case EQ: case NE:
4423 case UNEQ: case LTGT:
4424 case GT: case GTU: case UNGT:
4425 case LT: case LTU: case UNLT:
4426 case GE: case GEU: case UNGE:
4427 case LE: case LEU: case UNLE:
4428 case UNORDERED: case ORDERED:
4429 /* If this produces an integer result, we know which bits are set.
4430 Code here used to clear bits outside the mode of X, but that is
4431 now done above. */
4432 /* Mind that MODE is the mode the caller wants to look at this
4433 operation in, and not the actual operation mode. We can wind
4434 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4435 that describes the results of a vector compare. */
4436 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4437 && mode_width <= HOST_BITS_PER_WIDE_INT)
4438 nonzero = STORE_FLAG_VALUE;
4439 break;
4440
4441 case NEG:
4442 #if 0
4443 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4444 and num_sign_bit_copies. */
4445 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4446 == GET_MODE_PRECISION (GET_MODE (x)))
4447 nonzero = 1;
4448 #endif
4449
4450 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4451 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4452 break;
4453
4454 case ABS:
4455 #if 0
4456 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4457 and num_sign_bit_copies. */
4458 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4459 == GET_MODE_PRECISION (GET_MODE (x)))
4460 nonzero = 1;
4461 #endif
4462 break;
4463
4464 case TRUNCATE:
4465 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4466 known_x, known_mode, known_ret)
4467 & GET_MODE_MASK (mode));
4468 break;
4469
4470 case ZERO_EXTEND:
4471 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4472 known_x, known_mode, known_ret);
4473 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4474 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4475 break;
4476
4477 case SIGN_EXTEND:
4478 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4479 Otherwise, show all the bits in the outer mode but not the inner
4480 may be nonzero. */
4481 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4482 known_x, known_mode, known_ret);
4483 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4484 {
4485 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4486 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4487 inner_nz |= (GET_MODE_MASK (mode)
4488 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4489 }
4490
4491 nonzero &= inner_nz;
4492 break;
4493
4494 case AND:
4495 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4496 known_x, known_mode, known_ret)
4497 & cached_nonzero_bits (XEXP (x, 1), mode,
4498 known_x, known_mode, known_ret);
4499 break;
4500
4501 case XOR: case IOR:
4502 case UMIN: case UMAX: case SMIN: case SMAX:
4503 {
4504 unsigned HOST_WIDE_INT nonzero0
4505 = cached_nonzero_bits (XEXP (x, 0), mode,
4506 known_x, known_mode, known_ret);
4507
4508 /* Don't call nonzero_bits for the second time if it cannot change
4509 anything. */
4510 if ((nonzero & nonzero0) != nonzero)
4511 nonzero &= nonzero0
4512 | cached_nonzero_bits (XEXP (x, 1), mode,
4513 known_x, known_mode, known_ret);
4514 }
4515 break;
4516
4517 case PLUS: case MINUS:
4518 case MULT:
4519 case DIV: case UDIV:
4520 case MOD: case UMOD:
4521 /* We can apply the rules of arithmetic to compute the number of
4522 high- and low-order zero bits of these operations. We start by
4523 computing the width (position of the highest-order nonzero bit)
4524 and the number of low-order zero bits for each value. */
4525 {
4526 unsigned HOST_WIDE_INT nz0
4527 = cached_nonzero_bits (XEXP (x, 0), mode,
4528 known_x, known_mode, known_ret);
4529 unsigned HOST_WIDE_INT nz1
4530 = cached_nonzero_bits (XEXP (x, 1), mode,
4531 known_x, known_mode, known_ret);
4532 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4533 int width0 = floor_log2 (nz0) + 1;
4534 int width1 = floor_log2 (nz1) + 1;
4535 int low0 = ctz_or_zero (nz0);
4536 int low1 = ctz_or_zero (nz1);
4537 unsigned HOST_WIDE_INT op0_maybe_minusp
4538 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4539 unsigned HOST_WIDE_INT op1_maybe_minusp
4540 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4541 unsigned int result_width = mode_width;
4542 int result_low = 0;
4543
4544 switch (code)
4545 {
4546 case PLUS:
4547 result_width = MAX (width0, width1) + 1;
4548 result_low = MIN (low0, low1);
4549 break;
4550 case MINUS:
4551 result_low = MIN (low0, low1);
4552 break;
4553 case MULT:
4554 result_width = width0 + width1;
4555 result_low = low0 + low1;
4556 break;
4557 case DIV:
4558 if (width1 == 0)
4559 break;
4560 if (!op0_maybe_minusp && !op1_maybe_minusp)
4561 result_width = width0;
4562 break;
4563 case UDIV:
4564 if (width1 == 0)
4565 break;
4566 result_width = width0;
4567 break;
4568 case MOD:
4569 if (width1 == 0)
4570 break;
4571 if (!op0_maybe_minusp && !op1_maybe_minusp)
4572 result_width = MIN (width0, width1);
4573 result_low = MIN (low0, low1);
4574 break;
4575 case UMOD:
4576 if (width1 == 0)
4577 break;
4578 result_width = MIN (width0, width1);
4579 result_low = MIN (low0, low1);
4580 break;
4581 default:
4582 gcc_unreachable ();
4583 }
4584
4585 if (result_width < mode_width)
4586 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4587
4588 if (result_low > 0)
4589 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4590 }
4591 break;
4592
4593 case ZERO_EXTRACT:
4594 if (CONST_INT_P (XEXP (x, 1))
4595 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4596 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4597 break;
4598
4599 case SUBREG:
4600 /* If this is a SUBREG formed for a promoted variable that has
4601 been zero-extended, we know that at least the high-order bits
4602 are zero, though others might be too. */
4603 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4604 nonzero = GET_MODE_MASK (GET_MODE (x))
4605 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4606 known_x, known_mode, known_ret);
4607
4608 /* If the inner mode is a single word for both the host and target
4609 machines, we can compute this from which bits of the inner
4610 object might be nonzero. */
4611 inner_mode = GET_MODE (SUBREG_REG (x));
4612 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4613 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT)
4614 {
4615 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4616 known_x, known_mode, known_ret);
4617
4618 /* On many CISC machines, accessing an object in a wider mode
4619 causes the high-order bits to become undefined. So they are
4620 not known to be zero. */
4621 rtx_code extend_op;
4622 if ((!WORD_REGISTER_OPERATIONS
4623 /* If this is a typical RISC machine, we only have to worry
4624 about the way loads are extended. */
4625 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4626 ? val_signbit_known_set_p (inner_mode, nonzero)
4627 : extend_op != ZERO_EXTEND)
4628 || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4629 && GET_MODE_PRECISION (GET_MODE (x))
4630 > GET_MODE_PRECISION (inner_mode))
4631 nonzero
4632 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4633 }
4634 break;
4635
4636 case ASHIFTRT:
4637 case LSHIFTRT:
4638 case ASHIFT:
4639 case ROTATE:
4640 /* The nonzero bits are in two classes: any bits within MODE
4641 that aren't in GET_MODE (x) are always significant. The rest of the
4642 nonzero bits are those that are significant in the operand of
4643 the shift when shifted the appropriate number of bits. This
4644 shows that high-order bits are cleared by the right shift and
4645 low-order bits by left shifts. */
4646 if (CONST_INT_P (XEXP (x, 1))
4647 && INTVAL (XEXP (x, 1)) >= 0
4648 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4649 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4650 {
4651 machine_mode inner_mode = GET_MODE (x);
4652 unsigned int width = GET_MODE_PRECISION (inner_mode);
4653 int count = INTVAL (XEXP (x, 1));
4654 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4655 unsigned HOST_WIDE_INT op_nonzero
4656 = cached_nonzero_bits (XEXP (x, 0), mode,
4657 known_x, known_mode, known_ret);
4658 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4659 unsigned HOST_WIDE_INT outer = 0;
4660
4661 if (mode_width > width)
4662 outer = (op_nonzero & nonzero & ~mode_mask);
4663
4664 if (code == LSHIFTRT)
4665 inner >>= count;
4666 else if (code == ASHIFTRT)
4667 {
4668 inner >>= count;
4669
4670 /* If the sign bit may have been nonzero before the shift, we
4671 need to mark all the places it could have been copied to
4672 by the shift as possibly nonzero. */
4673 if (inner & (HOST_WIDE_INT_1U << (width - 1 - count)))
4674 inner |= ((HOST_WIDE_INT_1U << count) - 1)
4675 << (width - count);
4676 }
4677 else if (code == ASHIFT)
4678 inner <<= count;
4679 else
4680 inner = ((inner << (count % width)
4681 | (inner >> (width - (count % width)))) & mode_mask);
4682
4683 nonzero &= (outer | inner);
4684 }
4685 break;
4686
4687 case FFS:
4688 case POPCOUNT:
4689 /* This is at most the number of bits in the mode. */
4690 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4691 break;
4692
4693 case CLZ:
4694 /* If CLZ has a known value at zero, then the nonzero bits are
4695 that value, plus the number of bits in the mode minus one. */
4696 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4697 nonzero
4698 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4699 else
4700 nonzero = -1;
4701 break;
4702
4703 case CTZ:
4704 /* If CTZ has a known value at zero, then the nonzero bits are
4705 that value, plus the number of bits in the mode minus one. */
4706 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4707 nonzero
4708 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4709 else
4710 nonzero = -1;
4711 break;
4712
4713 case CLRSB:
4714 /* This is at most the number of bits in the mode minus 1. */
4715 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4716 break;
4717
4718 case PARITY:
4719 nonzero = 1;
4720 break;
4721
4722 case IF_THEN_ELSE:
4723 {
4724 unsigned HOST_WIDE_INT nonzero_true
4725 = cached_nonzero_bits (XEXP (x, 1), mode,
4726 known_x, known_mode, known_ret);
4727
4728 /* Don't call nonzero_bits for the second time if it cannot change
4729 anything. */
4730 if ((nonzero & nonzero_true) != nonzero)
4731 nonzero &= nonzero_true
4732 | cached_nonzero_bits (XEXP (x, 2), mode,
4733 known_x, known_mode, known_ret);
4734 }
4735 break;
4736
4737 default:
4738 break;
4739 }
4740
4741 return nonzero;
4742 }
4743
4744 /* See the macro definition above. */
4745 #undef cached_num_sign_bit_copies
4746
4747 \f
4748 /* Return true if num_sign_bit_copies1 might recurse into both operands
4749 of X. */
4750
4751 static inline bool
4752 num_sign_bit_copies_binary_arith_p (const_rtx x)
4753 {
4754 if (!ARITHMETIC_P (x))
4755 return false;
4756 switch (GET_CODE (x))
4757 {
4758 case IOR:
4759 case AND:
4760 case XOR:
4761 case SMIN:
4762 case SMAX:
4763 case UMIN:
4764 case UMAX:
4765 case PLUS:
4766 case MINUS:
4767 case MULT:
4768 return true;
4769 default:
4770 return false;
4771 }
4772 }
4773
4774 /* The function cached_num_sign_bit_copies is a wrapper around
4775 num_sign_bit_copies1. It avoids exponential behavior in
4776 num_sign_bit_copies1 when X has identical subexpressions on the
4777 first or the second level. */
4778
4779 static unsigned int
4780 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4781 machine_mode known_mode,
4782 unsigned int known_ret)
4783 {
4784 if (x == known_x && mode == known_mode)
4785 return known_ret;
4786
4787 /* Try to find identical subexpressions. If found call
4788 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4789 the precomputed value for the subexpression as KNOWN_RET. */
4790
4791 if (num_sign_bit_copies_binary_arith_p (x))
4792 {
4793 rtx x0 = XEXP (x, 0);
4794 rtx x1 = XEXP (x, 1);
4795
4796 /* Check the first level. */
4797 if (x0 == x1)
4798 return
4799 num_sign_bit_copies1 (x, mode, x0, mode,
4800 cached_num_sign_bit_copies (x0, mode, known_x,
4801 known_mode,
4802 known_ret));
4803
4804 /* Check the second level. */
4805 if (num_sign_bit_copies_binary_arith_p (x0)
4806 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4807 return
4808 num_sign_bit_copies1 (x, mode, x1, mode,
4809 cached_num_sign_bit_copies (x1, mode, known_x,
4810 known_mode,
4811 known_ret));
4812
4813 if (num_sign_bit_copies_binary_arith_p (x1)
4814 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4815 return
4816 num_sign_bit_copies1 (x, mode, x0, mode,
4817 cached_num_sign_bit_copies (x0, mode, known_x,
4818 known_mode,
4819 known_ret));
4820 }
4821
4822 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4823 }
4824
4825 /* Return the number of bits at the high-order end of X that are known to
4826 be equal to the sign bit. X will be used in mode MODE; if MODE is
4827 VOIDmode, X will be used in its own mode. The returned value will always
4828 be between 1 and the number of bits in MODE. */
4829
4830 static unsigned int
4831 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4832 machine_mode known_mode,
4833 unsigned int known_ret)
4834 {
4835 enum rtx_code code = GET_CODE (x);
4836 machine_mode inner_mode;
4837 int num0, num1, result;
4838 unsigned HOST_WIDE_INT nonzero;
4839
4840 /* If we weren't given a mode, use the mode of X. If the mode is still
4841 VOIDmode, we don't know anything. Likewise if one of the modes is
4842 floating-point. */
4843
4844 if (mode == VOIDmode)
4845 mode = GET_MODE (x);
4846
4847 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4848 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4849 return 1;
4850
4851 /* For a smaller mode, just ignore the high bits. */
4852 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4853 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4854 {
4855 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4856 known_x, known_mode, known_ret);
4857 return MAX (1,
4858 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4859 }
4860
4861 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4862 {
4863 /* If this machine does not do all register operations on the entire
4864 register and MODE is wider than the mode of X, we can say nothing
4865 at all about the high-order bits. */
4866 if (!WORD_REGISTER_OPERATIONS)
4867 return 1;
4868
4869 /* Likewise on machines that do, if the mode of the object is smaller
4870 than a word and loads of that size don't sign extend, we can say
4871 nothing about the high order bits. */
4872 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4873 && load_extend_op (GET_MODE (x)) != SIGN_EXTEND)
4874 return 1;
4875 }
4876
4877 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4878 the code in the switch below. */
4879 switch (code)
4880 {
4881 case REG:
4882
4883 #if defined(POINTERS_EXTEND_UNSIGNED)
4884 /* If pointers extend signed and this is a pointer in Pmode, say that
4885 all the bits above ptr_mode are known to be sign bit copies. */
4886 /* As we do not know which address space the pointer is referring to,
4887 we can do this only if the target does not support different pointer
4888 or address modes depending on the address space. */
4889 if (target_default_pointer_address_modes_p ()
4890 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4891 && mode == Pmode && REG_POINTER (x)
4892 && !targetm.have_ptr_extend ())
4893 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4894 #endif
4895
4896 {
4897 unsigned int copies_for_hook = 1, copies = 1;
4898 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4899 known_mode, known_ret,
4900 &copies_for_hook);
4901
4902 if (new_rtx)
4903 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4904 known_mode, known_ret);
4905
4906 if (copies > 1 || copies_for_hook > 1)
4907 return MAX (copies, copies_for_hook);
4908
4909 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4910 }
4911 break;
4912
4913 case MEM:
4914 /* Some RISC machines sign-extend all loads of smaller than a word. */
4915 if (load_extend_op (GET_MODE (x)) == SIGN_EXTEND)
4916 return MAX (1, ((int) bitwidth
4917 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4918 break;
4919
4920 case CONST_INT:
4921 /* If the constant is negative, take its 1's complement and remask.
4922 Then see how many zero bits we have. */
4923 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4924 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4925 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4926 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4927
4928 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4929
4930 case SUBREG:
4931 /* If this is a SUBREG for a promoted object that is sign-extended
4932 and we are looking at it in a wider mode, we know that at least the
4933 high-order bits are known to be sign bit copies. */
4934
4935 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4936 {
4937 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4938 known_x, known_mode, known_ret);
4939 return MAX ((int) bitwidth
4940 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4941 num0);
4942 }
4943
4944 /* For a smaller object, just ignore the high bits. */
4945 inner_mode = GET_MODE (SUBREG_REG (x));
4946 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
4947 {
4948 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4949 known_x, known_mode, known_ret);
4950 return
4951 MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth));
4952 }
4953
4954 /* For paradoxical SUBREGs on machines where all register operations
4955 affect the entire register, just look inside. Note that we are
4956 passing MODE to the recursive call, so the number of sign bit copies
4957 will remain relative to that mode, not the inner mode. */
4958
4959 /* This works only if loads sign extend. Otherwise, if we get a
4960 reload for the inner part, it may be loaded from the stack, and
4961 then we lose all sign bit copies that existed before the store
4962 to the stack. */
4963
4964 if (WORD_REGISTER_OPERATIONS
4965 && load_extend_op (inner_mode) == SIGN_EXTEND
4966 && paradoxical_subreg_p (x)
4967 && (MEM_P (SUBREG_REG (x)) || REG_P (SUBREG_REG (x))))
4968 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4969 known_x, known_mode, known_ret);
4970 break;
4971
4972 case SIGN_EXTRACT:
4973 if (CONST_INT_P (XEXP (x, 1)))
4974 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4975 break;
4976
4977 case SIGN_EXTEND:
4978 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4979 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4980 known_x, known_mode, known_ret));
4981
4982 case TRUNCATE:
4983 /* For a smaller object, just ignore the high bits. */
4984 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4985 known_x, known_mode, known_ret);
4986 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4987 - bitwidth)));
4988
4989 case NOT:
4990 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4991 known_x, known_mode, known_ret);
4992
4993 case ROTATE: case ROTATERT:
4994 /* If we are rotating left by a number of bits less than the number
4995 of sign bit copies, we can just subtract that amount from the
4996 number. */
4997 if (CONST_INT_P (XEXP (x, 1))
4998 && INTVAL (XEXP (x, 1)) >= 0
4999 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5000 {
5001 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5002 known_x, known_mode, known_ret);
5003 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5004 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5005 }
5006 break;
5007
5008 case NEG:
5009 /* In general, this subtracts one sign bit copy. But if the value
5010 is known to be positive, the number of sign bit copies is the
5011 same as that of the input. Finally, if the input has just one bit
5012 that might be nonzero, all the bits are copies of the sign bit. */
5013 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5014 known_x, known_mode, known_ret);
5015 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5016 return num0 > 1 ? num0 - 1 : 1;
5017
5018 nonzero = nonzero_bits (XEXP (x, 0), mode);
5019 if (nonzero == 1)
5020 return bitwidth;
5021
5022 if (num0 > 1
5023 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5024 num0--;
5025
5026 return num0;
5027
5028 case IOR: case AND: case XOR:
5029 case SMIN: case SMAX: case UMIN: case UMAX:
5030 /* Logical operations will preserve the number of sign-bit copies.
5031 MIN and MAX operations always return one of the operands. */
5032 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5033 known_x, known_mode, known_ret);
5034 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5035 known_x, known_mode, known_ret);
5036
5037 /* If num1 is clearing some of the top bits then regardless of
5038 the other term, we are guaranteed to have at least that many
5039 high-order zero bits. */
5040 if (code == AND
5041 && num1 > 1
5042 && bitwidth <= HOST_BITS_PER_WIDE_INT
5043 && CONST_INT_P (XEXP (x, 1))
5044 && (UINTVAL (XEXP (x, 1))
5045 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5046 return num1;
5047
5048 /* Similarly for IOR when setting high-order bits. */
5049 if (code == IOR
5050 && num1 > 1
5051 && bitwidth <= HOST_BITS_PER_WIDE_INT
5052 && CONST_INT_P (XEXP (x, 1))
5053 && (UINTVAL (XEXP (x, 1))
5054 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5055 return num1;
5056
5057 return MIN (num0, num1);
5058
5059 case PLUS: case MINUS:
5060 /* For addition and subtraction, we can have a 1-bit carry. However,
5061 if we are subtracting 1 from a positive number, there will not
5062 be such a carry. Furthermore, if the positive number is known to
5063 be 0 or 1, we know the result is either -1 or 0. */
5064
5065 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5066 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5067 {
5068 nonzero = nonzero_bits (XEXP (x, 0), mode);
5069 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5070 return (nonzero == 1 || nonzero == 0 ? bitwidth
5071 : bitwidth - floor_log2 (nonzero) - 1);
5072 }
5073
5074 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5075 known_x, known_mode, known_ret);
5076 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5077 known_x, known_mode, known_ret);
5078 result = MAX (1, MIN (num0, num1) - 1);
5079
5080 return result;
5081
5082 case MULT:
5083 /* The number of bits of the product is the sum of the number of
5084 bits of both terms. However, unless one of the terms if known
5085 to be positive, we must allow for an additional bit since negating
5086 a negative number can remove one sign bit copy. */
5087
5088 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5089 known_x, known_mode, known_ret);
5090 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5091 known_x, known_mode, known_ret);
5092
5093 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5094 if (result > 0
5095 && (bitwidth > HOST_BITS_PER_WIDE_INT
5096 || (((nonzero_bits (XEXP (x, 0), mode)
5097 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5098 && ((nonzero_bits (XEXP (x, 1), mode)
5099 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5100 != 0))))
5101 result--;
5102
5103 return MAX (1, result);
5104
5105 case UDIV:
5106 /* The result must be <= the first operand. If the first operand
5107 has the high bit set, we know nothing about the number of sign
5108 bit copies. */
5109 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5110 return 1;
5111 else if ((nonzero_bits (XEXP (x, 0), mode)
5112 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5113 return 1;
5114 else
5115 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5116 known_x, known_mode, known_ret);
5117
5118 case UMOD:
5119 /* The result must be <= the second operand. If the second operand
5120 has (or just might have) the high bit set, we know nothing about
5121 the number of sign bit copies. */
5122 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5123 return 1;
5124 else if ((nonzero_bits (XEXP (x, 1), mode)
5125 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5126 return 1;
5127 else
5128 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5129 known_x, known_mode, known_ret);
5130
5131 case DIV:
5132 /* Similar to unsigned division, except that we have to worry about
5133 the case where the divisor is negative, in which case we have
5134 to add 1. */
5135 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5136 known_x, known_mode, known_ret);
5137 if (result > 1
5138 && (bitwidth > HOST_BITS_PER_WIDE_INT
5139 || (nonzero_bits (XEXP (x, 1), mode)
5140 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5141 result--;
5142
5143 return result;
5144
5145 case MOD:
5146 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5147 known_x, known_mode, known_ret);
5148 if (result > 1
5149 && (bitwidth > HOST_BITS_PER_WIDE_INT
5150 || (nonzero_bits (XEXP (x, 1), mode)
5151 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5152 result--;
5153
5154 return result;
5155
5156 case ASHIFTRT:
5157 /* Shifts by a constant add to the number of bits equal to the
5158 sign bit. */
5159 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5160 known_x, known_mode, known_ret);
5161 if (CONST_INT_P (XEXP (x, 1))
5162 && INTVAL (XEXP (x, 1)) > 0
5163 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5164 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5165
5166 return num0;
5167
5168 case ASHIFT:
5169 /* Left shifts destroy copies. */
5170 if (!CONST_INT_P (XEXP (x, 1))
5171 || INTVAL (XEXP (x, 1)) < 0
5172 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5173 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5174 return 1;
5175
5176 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5177 known_x, known_mode, known_ret);
5178 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5179
5180 case IF_THEN_ELSE:
5181 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5182 known_x, known_mode, known_ret);
5183 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5184 known_x, known_mode, known_ret);
5185 return MIN (num0, num1);
5186
5187 case EQ: case NE: case GE: case GT: case LE: case LT:
5188 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5189 case GEU: case GTU: case LEU: case LTU:
5190 case UNORDERED: case ORDERED:
5191 /* If the constant is negative, take its 1's complement and remask.
5192 Then see how many zero bits we have. */
5193 nonzero = STORE_FLAG_VALUE;
5194 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5195 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5196 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5197
5198 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5199
5200 default:
5201 break;
5202 }
5203
5204 /* If we haven't been able to figure it out by one of the above rules,
5205 see if some of the high-order bits are known to be zero. If so,
5206 count those bits and return one less than that amount. If we can't
5207 safely compute the mask for this mode, always return BITWIDTH. */
5208
5209 bitwidth = GET_MODE_PRECISION (mode);
5210 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5211 return 1;
5212
5213 nonzero = nonzero_bits (x, mode);
5214 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5215 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5216 }
5217
5218 /* Calculate the rtx_cost of a single instruction. A return value of
5219 zero indicates an instruction pattern without a known cost. */
5220
5221 int
5222 insn_rtx_cost (rtx pat, bool speed)
5223 {
5224 int i, cost;
5225 rtx set;
5226
5227 /* Extract the single set rtx from the instruction pattern.
5228 We can't use single_set since we only have the pattern. */
5229 if (GET_CODE (pat) == SET)
5230 set = pat;
5231 else if (GET_CODE (pat) == PARALLEL)
5232 {
5233 set = NULL_RTX;
5234 for (i = 0; i < XVECLEN (pat, 0); i++)
5235 {
5236 rtx x = XVECEXP (pat, 0, i);
5237 if (GET_CODE (x) == SET)
5238 {
5239 if (set)
5240 return 0;
5241 set = x;
5242 }
5243 }
5244 if (!set)
5245 return 0;
5246 }
5247 else
5248 return 0;
5249
5250 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5251 return cost > 0 ? cost : COSTS_N_INSNS (1);
5252 }
5253
5254 /* Returns estimate on cost of computing SEQ. */
5255
5256 unsigned
5257 seq_cost (const rtx_insn *seq, bool speed)
5258 {
5259 unsigned cost = 0;
5260 rtx set;
5261
5262 for (; seq; seq = NEXT_INSN (seq))
5263 {
5264 set = single_set (seq);
5265 if (set)
5266 cost += set_rtx_cost (set, speed);
5267 else
5268 cost++;
5269 }
5270
5271 return cost;
5272 }
5273
5274 /* Given an insn INSN and condition COND, return the condition in a
5275 canonical form to simplify testing by callers. Specifically:
5276
5277 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5278 (2) Both operands will be machine operands; (cc0) will have been replaced.
5279 (3) If an operand is a constant, it will be the second operand.
5280 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5281 for GE, GEU, and LEU.
5282
5283 If the condition cannot be understood, or is an inequality floating-point
5284 comparison which needs to be reversed, 0 will be returned.
5285
5286 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5287
5288 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5289 insn used in locating the condition was found. If a replacement test
5290 of the condition is desired, it should be placed in front of that
5291 insn and we will be sure that the inputs are still valid.
5292
5293 If WANT_REG is nonzero, we wish the condition to be relative to that
5294 register, if possible. Therefore, do not canonicalize the condition
5295 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5296 to be a compare to a CC mode register.
5297
5298 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5299 and at INSN. */
5300
5301 rtx
5302 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5303 rtx_insn **earliest,
5304 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5305 {
5306 enum rtx_code code;
5307 rtx_insn *prev = insn;
5308 const_rtx set;
5309 rtx tem;
5310 rtx op0, op1;
5311 int reverse_code = 0;
5312 machine_mode mode;
5313 basic_block bb = BLOCK_FOR_INSN (insn);
5314
5315 code = GET_CODE (cond);
5316 mode = GET_MODE (cond);
5317 op0 = XEXP (cond, 0);
5318 op1 = XEXP (cond, 1);
5319
5320 if (reverse)
5321 code = reversed_comparison_code (cond, insn);
5322 if (code == UNKNOWN)
5323 return 0;
5324
5325 if (earliest)
5326 *earliest = insn;
5327
5328 /* If we are comparing a register with zero, see if the register is set
5329 in the previous insn to a COMPARE or a comparison operation. Perform
5330 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5331 in cse.c */
5332
5333 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5334 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5335 && op1 == CONST0_RTX (GET_MODE (op0))
5336 && op0 != want_reg)
5337 {
5338 /* Set nonzero when we find something of interest. */
5339 rtx x = 0;
5340
5341 /* If comparison with cc0, import actual comparison from compare
5342 insn. */
5343 if (op0 == cc0_rtx)
5344 {
5345 if ((prev = prev_nonnote_insn (prev)) == 0
5346 || !NONJUMP_INSN_P (prev)
5347 || (set = single_set (prev)) == 0
5348 || SET_DEST (set) != cc0_rtx)
5349 return 0;
5350
5351 op0 = SET_SRC (set);
5352 op1 = CONST0_RTX (GET_MODE (op0));
5353 if (earliest)
5354 *earliest = prev;
5355 }
5356
5357 /* If this is a COMPARE, pick up the two things being compared. */
5358 if (GET_CODE (op0) == COMPARE)
5359 {
5360 op1 = XEXP (op0, 1);
5361 op0 = XEXP (op0, 0);
5362 continue;
5363 }
5364 else if (!REG_P (op0))
5365 break;
5366
5367 /* Go back to the previous insn. Stop if it is not an INSN. We also
5368 stop if it isn't a single set or if it has a REG_INC note because
5369 we don't want to bother dealing with it. */
5370
5371 prev = prev_nonnote_nondebug_insn (prev);
5372
5373 if (prev == 0
5374 || !NONJUMP_INSN_P (prev)
5375 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5376 /* In cfglayout mode, there do not have to be labels at the
5377 beginning of a block, or jumps at the end, so the previous
5378 conditions would not stop us when we reach bb boundary. */
5379 || BLOCK_FOR_INSN (prev) != bb)
5380 break;
5381
5382 set = set_of (op0, prev);
5383
5384 if (set
5385 && (GET_CODE (set) != SET
5386 || !rtx_equal_p (SET_DEST (set), op0)))
5387 break;
5388
5389 /* If this is setting OP0, get what it sets it to if it looks
5390 relevant. */
5391 if (set)
5392 {
5393 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5394 #ifdef FLOAT_STORE_FLAG_VALUE
5395 REAL_VALUE_TYPE fsfv;
5396 #endif
5397
5398 /* ??? We may not combine comparisons done in a CCmode with
5399 comparisons not done in a CCmode. This is to aid targets
5400 like Alpha that have an IEEE compliant EQ instruction, and
5401 a non-IEEE compliant BEQ instruction. The use of CCmode is
5402 actually artificial, simply to prevent the combination, but
5403 should not affect other platforms.
5404
5405 However, we must allow VOIDmode comparisons to match either
5406 CCmode or non-CCmode comparison, because some ports have
5407 modeless comparisons inside branch patterns.
5408
5409 ??? This mode check should perhaps look more like the mode check
5410 in simplify_comparison in combine. */
5411 if (((GET_MODE_CLASS (mode) == MODE_CC)
5412 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5413 && mode != VOIDmode
5414 && inner_mode != VOIDmode)
5415 break;
5416 if (GET_CODE (SET_SRC (set)) == COMPARE
5417 || (((code == NE
5418 || (code == LT
5419 && val_signbit_known_set_p (inner_mode,
5420 STORE_FLAG_VALUE))
5421 #ifdef FLOAT_STORE_FLAG_VALUE
5422 || (code == LT
5423 && SCALAR_FLOAT_MODE_P (inner_mode)
5424 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5425 REAL_VALUE_NEGATIVE (fsfv)))
5426 #endif
5427 ))
5428 && COMPARISON_P (SET_SRC (set))))
5429 x = SET_SRC (set);
5430 else if (((code == EQ
5431 || (code == GE
5432 && val_signbit_known_set_p (inner_mode,
5433 STORE_FLAG_VALUE))
5434 #ifdef FLOAT_STORE_FLAG_VALUE
5435 || (code == GE
5436 && SCALAR_FLOAT_MODE_P (inner_mode)
5437 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5438 REAL_VALUE_NEGATIVE (fsfv)))
5439 #endif
5440 ))
5441 && COMPARISON_P (SET_SRC (set)))
5442 {
5443 reverse_code = 1;
5444 x = SET_SRC (set);
5445 }
5446 else if ((code == EQ || code == NE)
5447 && GET_CODE (SET_SRC (set)) == XOR)
5448 /* Handle sequences like:
5449
5450 (set op0 (xor X Y))
5451 ...(eq|ne op0 (const_int 0))...
5452
5453 in which case:
5454
5455 (eq op0 (const_int 0)) reduces to (eq X Y)
5456 (ne op0 (const_int 0)) reduces to (ne X Y)
5457
5458 This is the form used by MIPS16, for example. */
5459 x = SET_SRC (set);
5460 else
5461 break;
5462 }
5463
5464 else if (reg_set_p (op0, prev))
5465 /* If this sets OP0, but not directly, we have to give up. */
5466 break;
5467
5468 if (x)
5469 {
5470 /* If the caller is expecting the condition to be valid at INSN,
5471 make sure X doesn't change before INSN. */
5472 if (valid_at_insn_p)
5473 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5474 break;
5475 if (COMPARISON_P (x))
5476 code = GET_CODE (x);
5477 if (reverse_code)
5478 {
5479 code = reversed_comparison_code (x, prev);
5480 if (code == UNKNOWN)
5481 return 0;
5482 reverse_code = 0;
5483 }
5484
5485 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5486 if (earliest)
5487 *earliest = prev;
5488 }
5489 }
5490
5491 /* If constant is first, put it last. */
5492 if (CONSTANT_P (op0))
5493 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5494
5495 /* If OP0 is the result of a comparison, we weren't able to find what
5496 was really being compared, so fail. */
5497 if (!allow_cc_mode
5498 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5499 return 0;
5500
5501 /* Canonicalize any ordered comparison with integers involving equality
5502 if we can do computations in the relevant mode and we do not
5503 overflow. */
5504
5505 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5506 && CONST_INT_P (op1)
5507 && GET_MODE (op0) != VOIDmode
5508 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5509 {
5510 HOST_WIDE_INT const_val = INTVAL (op1);
5511 unsigned HOST_WIDE_INT uconst_val = const_val;
5512 unsigned HOST_WIDE_INT max_val
5513 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5514
5515 switch (code)
5516 {
5517 case LE:
5518 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5519 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5520 break;
5521
5522 /* When cross-compiling, const_val might be sign-extended from
5523 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5524 case GE:
5525 if ((const_val & max_val)
5526 != (HOST_WIDE_INT_1U
5527 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5528 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5529 break;
5530
5531 case LEU:
5532 if (uconst_val < max_val)
5533 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5534 break;
5535
5536 case GEU:
5537 if (uconst_val != 0)
5538 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5539 break;
5540
5541 default:
5542 break;
5543 }
5544 }
5545
5546 /* Never return CC0; return zero instead. */
5547 if (CC0_P (op0))
5548 return 0;
5549
5550 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5551 }
5552
5553 /* Given a jump insn JUMP, return the condition that will cause it to branch
5554 to its JUMP_LABEL. If the condition cannot be understood, or is an
5555 inequality floating-point comparison which needs to be reversed, 0 will
5556 be returned.
5557
5558 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5559 insn used in locating the condition was found. If a replacement test
5560 of the condition is desired, it should be placed in front of that
5561 insn and we will be sure that the inputs are still valid. If EARLIEST
5562 is null, the returned condition will be valid at INSN.
5563
5564 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5565 compare CC mode register.
5566
5567 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5568
5569 rtx
5570 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5571 int valid_at_insn_p)
5572 {
5573 rtx cond;
5574 int reverse;
5575 rtx set;
5576
5577 /* If this is not a standard conditional jump, we can't parse it. */
5578 if (!JUMP_P (jump)
5579 || ! any_condjump_p (jump))
5580 return 0;
5581 set = pc_set (jump);
5582
5583 cond = XEXP (SET_SRC (set), 0);
5584
5585 /* If this branches to JUMP_LABEL when the condition is false, reverse
5586 the condition. */
5587 reverse
5588 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5589 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5590
5591 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5592 allow_cc_mode, valid_at_insn_p);
5593 }
5594
5595 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5596 TARGET_MODE_REP_EXTENDED.
5597
5598 Note that we assume that the property of
5599 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5600 narrower than mode B. I.e., if A is a mode narrower than B then in
5601 order to be able to operate on it in mode B, mode A needs to
5602 satisfy the requirements set by the representation of mode B. */
5603
5604 static void
5605 init_num_sign_bit_copies_in_rep (void)
5606 {
5607 machine_mode mode, in_mode;
5608
5609 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5610 in_mode = GET_MODE_WIDER_MODE (mode))
5611 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5612 mode = GET_MODE_WIDER_MODE (mode))
5613 {
5614 machine_mode i;
5615
5616 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5617 extends to the next widest mode. */
5618 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5619 || GET_MODE_WIDER_MODE (mode) == in_mode);
5620
5621 /* We are in in_mode. Count how many bits outside of mode
5622 have to be copies of the sign-bit. */
5623 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5624 {
5625 machine_mode wider = GET_MODE_WIDER_MODE (i);
5626
5627 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5628 /* We can only check sign-bit copies starting from the
5629 top-bit. In order to be able to check the bits we
5630 have already seen we pretend that subsequent bits
5631 have to be sign-bit copies too. */
5632 || num_sign_bit_copies_in_rep [in_mode][mode])
5633 num_sign_bit_copies_in_rep [in_mode][mode]
5634 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5635 }
5636 }
5637 }
5638
5639 /* Suppose that truncation from the machine mode of X to MODE is not a
5640 no-op. See if there is anything special about X so that we can
5641 assume it already contains a truncated value of MODE. */
5642
5643 bool
5644 truncated_to_mode (machine_mode mode, const_rtx x)
5645 {
5646 /* This register has already been used in MODE without explicit
5647 truncation. */
5648 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5649 return true;
5650
5651 /* See if we already satisfy the requirements of MODE. If yes we
5652 can just switch to MODE. */
5653 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5654 && (num_sign_bit_copies (x, GET_MODE (x))
5655 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5656 return true;
5657
5658 return false;
5659 }
5660 \f
5661 /* Return true if RTX code CODE has a single sequence of zero or more
5662 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5663 entry in that case. */
5664
5665 static bool
5666 setup_reg_subrtx_bounds (unsigned int code)
5667 {
5668 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5669 unsigned int i = 0;
5670 for (; format[i] != 'e'; ++i)
5671 {
5672 if (!format[i])
5673 /* No subrtxes. Leave start and count as 0. */
5674 return true;
5675 if (format[i] == 'E' || format[i] == 'V')
5676 return false;
5677 }
5678
5679 /* Record the sequence of 'e's. */
5680 rtx_all_subrtx_bounds[code].start = i;
5681 do
5682 ++i;
5683 while (format[i] == 'e');
5684 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5685 /* rtl-iter.h relies on this. */
5686 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5687
5688 for (; format[i]; ++i)
5689 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5690 return false;
5691
5692 return true;
5693 }
5694
5695 /* Initialize rtx_all_subrtx_bounds. */
5696 void
5697 init_rtlanal (void)
5698 {
5699 int i;
5700 for (i = 0; i < NUM_RTX_CODE; i++)
5701 {
5702 if (!setup_reg_subrtx_bounds (i))
5703 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5704 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5705 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5706 }
5707
5708 init_num_sign_bit_copies_in_rep ();
5709 }
5710 \f
5711 /* Check whether this is a constant pool constant. */
5712 bool
5713 constant_pool_constant_p (rtx x)
5714 {
5715 x = avoid_constant_pool_reference (x);
5716 return CONST_DOUBLE_P (x);
5717 }
5718 \f
5719 /* If M is a bitmask that selects a field of low-order bits within an item but
5720 not the entire word, return the length of the field. Return -1 otherwise.
5721 M is used in machine mode MODE. */
5722
5723 int
5724 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5725 {
5726 if (mode != VOIDmode)
5727 {
5728 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5729 return -1;
5730 m &= GET_MODE_MASK (mode);
5731 }
5732
5733 return exact_log2 (m + 1);
5734 }
5735
5736 /* Return the mode of MEM's address. */
5737
5738 machine_mode
5739 get_address_mode (rtx mem)
5740 {
5741 machine_mode mode;
5742
5743 gcc_assert (MEM_P (mem));
5744 mode = GET_MODE (XEXP (mem, 0));
5745 if (mode != VOIDmode)
5746 return mode;
5747 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5748 }
5749 \f
5750 /* Split up a CONST_DOUBLE or integer constant rtx
5751 into two rtx's for single words,
5752 storing in *FIRST the word that comes first in memory in the target
5753 and in *SECOND the other.
5754
5755 TODO: This function needs to be rewritten to work on any size
5756 integer. */
5757
5758 void
5759 split_double (rtx value, rtx *first, rtx *second)
5760 {
5761 if (CONST_INT_P (value))
5762 {
5763 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5764 {
5765 /* In this case the CONST_INT holds both target words.
5766 Extract the bits from it into two word-sized pieces.
5767 Sign extend each half to HOST_WIDE_INT. */
5768 unsigned HOST_WIDE_INT low, high;
5769 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5770 unsigned bits_per_word = BITS_PER_WORD;
5771
5772 /* Set sign_bit to the most significant bit of a word. */
5773 sign_bit = 1;
5774 sign_bit <<= bits_per_word - 1;
5775
5776 /* Set mask so that all bits of the word are set. We could
5777 have used 1 << BITS_PER_WORD instead of basing the
5778 calculation on sign_bit. However, on machines where
5779 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5780 compiler warning, even though the code would never be
5781 executed. */
5782 mask = sign_bit << 1;
5783 mask--;
5784
5785 /* Set sign_extend as any remaining bits. */
5786 sign_extend = ~mask;
5787
5788 /* Pick the lower word and sign-extend it. */
5789 low = INTVAL (value);
5790 low &= mask;
5791 if (low & sign_bit)
5792 low |= sign_extend;
5793
5794 /* Pick the higher word, shifted to the least significant
5795 bits, and sign-extend it. */
5796 high = INTVAL (value);
5797 high >>= bits_per_word - 1;
5798 high >>= 1;
5799 high &= mask;
5800 if (high & sign_bit)
5801 high |= sign_extend;
5802
5803 /* Store the words in the target machine order. */
5804 if (WORDS_BIG_ENDIAN)
5805 {
5806 *first = GEN_INT (high);
5807 *second = GEN_INT (low);
5808 }
5809 else
5810 {
5811 *first = GEN_INT (low);
5812 *second = GEN_INT (high);
5813 }
5814 }
5815 else
5816 {
5817 /* The rule for using CONST_INT for a wider mode
5818 is that we regard the value as signed.
5819 So sign-extend it. */
5820 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5821 if (WORDS_BIG_ENDIAN)
5822 {
5823 *first = high;
5824 *second = value;
5825 }
5826 else
5827 {
5828 *first = value;
5829 *second = high;
5830 }
5831 }
5832 }
5833 else if (GET_CODE (value) == CONST_WIDE_INT)
5834 {
5835 /* All of this is scary code and needs to be converted to
5836 properly work with any size integer. */
5837 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5838 if (WORDS_BIG_ENDIAN)
5839 {
5840 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5841 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5842 }
5843 else
5844 {
5845 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5846 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5847 }
5848 }
5849 else if (!CONST_DOUBLE_P (value))
5850 {
5851 if (WORDS_BIG_ENDIAN)
5852 {
5853 *first = const0_rtx;
5854 *second = value;
5855 }
5856 else
5857 {
5858 *first = value;
5859 *second = const0_rtx;
5860 }
5861 }
5862 else if (GET_MODE (value) == VOIDmode
5863 /* This is the old way we did CONST_DOUBLE integers. */
5864 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5865 {
5866 /* In an integer, the words are defined as most and least significant.
5867 So order them by the target's convention. */
5868 if (WORDS_BIG_ENDIAN)
5869 {
5870 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5871 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5872 }
5873 else
5874 {
5875 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5876 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5877 }
5878 }
5879 else
5880 {
5881 long l[2];
5882
5883 /* Note, this converts the REAL_VALUE_TYPE to the target's
5884 format, splits up the floating point double and outputs
5885 exactly 32 bits of it into each of l[0] and l[1] --
5886 not necessarily BITS_PER_WORD bits. */
5887 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5888
5889 /* If 32 bits is an entire word for the target, but not for the host,
5890 then sign-extend on the host so that the number will look the same
5891 way on the host that it would on the target. See for instance
5892 simplify_unary_operation. The #if is needed to avoid compiler
5893 warnings. */
5894
5895 #if HOST_BITS_PER_LONG > 32
5896 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5897 {
5898 if (l[0] & ((long) 1 << 31))
5899 l[0] |= ((unsigned long) (-1) << 32);
5900 if (l[1] & ((long) 1 << 31))
5901 l[1] |= ((unsigned long) (-1) << 32);
5902 }
5903 #endif
5904
5905 *first = GEN_INT (l[0]);
5906 *second = GEN_INT (l[1]);
5907 }
5908 }
5909
5910 /* Return true if X is a sign_extract or zero_extract from the least
5911 significant bit. */
5912
5913 static bool
5914 lsb_bitfield_op_p (rtx x)
5915 {
5916 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5917 {
5918 machine_mode mode = GET_MODE (XEXP (x, 0));
5919 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5920 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5921
5922 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5923 }
5924 return false;
5925 }
5926
5927 /* Strip outer address "mutations" from LOC and return a pointer to the
5928 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5929 stripped expression there.
5930
5931 "Mutations" either convert between modes or apply some kind of
5932 extension, truncation or alignment. */
5933
5934 rtx *
5935 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5936 {
5937 for (;;)
5938 {
5939 enum rtx_code code = GET_CODE (*loc);
5940 if (GET_RTX_CLASS (code) == RTX_UNARY)
5941 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5942 used to convert between pointer sizes. */
5943 loc = &XEXP (*loc, 0);
5944 else if (lsb_bitfield_op_p (*loc))
5945 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5946 acts as a combined truncation and extension. */
5947 loc = &XEXP (*loc, 0);
5948 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5949 /* (and ... (const_int -X)) is used to align to X bytes. */
5950 loc = &XEXP (*loc, 0);
5951 else if (code == SUBREG
5952 && !OBJECT_P (SUBREG_REG (*loc))
5953 && subreg_lowpart_p (*loc))
5954 /* (subreg (operator ...) ...) inside and is used for mode
5955 conversion too. */
5956 loc = &SUBREG_REG (*loc);
5957 else
5958 return loc;
5959 if (outer_code)
5960 *outer_code = code;
5961 }
5962 }
5963
5964 /* Return true if CODE applies some kind of scale. The scaled value is
5965 is the first operand and the scale is the second. */
5966
5967 static bool
5968 binary_scale_code_p (enum rtx_code code)
5969 {
5970 return (code == MULT
5971 || code == ASHIFT
5972 /* Needed by ARM targets. */
5973 || code == ASHIFTRT
5974 || code == LSHIFTRT
5975 || code == ROTATE
5976 || code == ROTATERT);
5977 }
5978
5979 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5980 (see address_info). Return null otherwise. */
5981
5982 static rtx *
5983 get_base_term (rtx *inner)
5984 {
5985 if (GET_CODE (*inner) == LO_SUM)
5986 inner = strip_address_mutations (&XEXP (*inner, 0));
5987 if (REG_P (*inner)
5988 || MEM_P (*inner)
5989 || GET_CODE (*inner) == SUBREG
5990 || GET_CODE (*inner) == SCRATCH)
5991 return inner;
5992 return 0;
5993 }
5994
5995 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5996 (see address_info). Return null otherwise. */
5997
5998 static rtx *
5999 get_index_term (rtx *inner)
6000 {
6001 /* At present, only constant scales are allowed. */
6002 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6003 inner = strip_address_mutations (&XEXP (*inner, 0));
6004 if (REG_P (*inner)
6005 || MEM_P (*inner)
6006 || GET_CODE (*inner) == SUBREG
6007 || GET_CODE (*inner) == SCRATCH)
6008 return inner;
6009 return 0;
6010 }
6011
6012 /* Set the segment part of address INFO to LOC, given that INNER is the
6013 unmutated value. */
6014
6015 static void
6016 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6017 {
6018 gcc_assert (!info->segment);
6019 info->segment = loc;
6020 info->segment_term = inner;
6021 }
6022
6023 /* Set the base part of address INFO to LOC, given that INNER is the
6024 unmutated value. */
6025
6026 static void
6027 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6028 {
6029 gcc_assert (!info->base);
6030 info->base = loc;
6031 info->base_term = inner;
6032 }
6033
6034 /* Set the index part of address INFO to LOC, given that INNER is the
6035 unmutated value. */
6036
6037 static void
6038 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6039 {
6040 gcc_assert (!info->index);
6041 info->index = loc;
6042 info->index_term = inner;
6043 }
6044
6045 /* Set the displacement part of address INFO to LOC, given that INNER
6046 is the constant term. */
6047
6048 static void
6049 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6050 {
6051 gcc_assert (!info->disp);
6052 info->disp = loc;
6053 info->disp_term = inner;
6054 }
6055
6056 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6057 rest of INFO accordingly. */
6058
6059 static void
6060 decompose_incdec_address (struct address_info *info)
6061 {
6062 info->autoinc_p = true;
6063
6064 rtx *base = &XEXP (*info->inner, 0);
6065 set_address_base (info, base, base);
6066 gcc_checking_assert (info->base == info->base_term);
6067
6068 /* These addresses are only valid when the size of the addressed
6069 value is known. */
6070 gcc_checking_assert (info->mode != VOIDmode);
6071 }
6072
6073 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6074 of INFO accordingly. */
6075
6076 static void
6077 decompose_automod_address (struct address_info *info)
6078 {
6079 info->autoinc_p = true;
6080
6081 rtx *base = &XEXP (*info->inner, 0);
6082 set_address_base (info, base, base);
6083 gcc_checking_assert (info->base == info->base_term);
6084
6085 rtx plus = XEXP (*info->inner, 1);
6086 gcc_assert (GET_CODE (plus) == PLUS);
6087
6088 info->base_term2 = &XEXP (plus, 0);
6089 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6090
6091 rtx *step = &XEXP (plus, 1);
6092 rtx *inner_step = strip_address_mutations (step);
6093 if (CONSTANT_P (*inner_step))
6094 set_address_disp (info, step, inner_step);
6095 else
6096 set_address_index (info, step, inner_step);
6097 }
6098
6099 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6100 values in [PTR, END). Return a pointer to the end of the used array. */
6101
6102 static rtx **
6103 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6104 {
6105 rtx x = *loc;
6106 if (GET_CODE (x) == PLUS)
6107 {
6108 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6109 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6110 }
6111 else
6112 {
6113 gcc_assert (ptr != end);
6114 *ptr++ = loc;
6115 }
6116 return ptr;
6117 }
6118
6119 /* Evaluate the likelihood of X being a base or index value, returning
6120 positive if it is likely to be a base, negative if it is likely to be
6121 an index, and 0 if we can't tell. Make the magnitude of the return
6122 value reflect the amount of confidence we have in the answer.
6123
6124 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6125
6126 static int
6127 baseness (rtx x, machine_mode mode, addr_space_t as,
6128 enum rtx_code outer_code, enum rtx_code index_code)
6129 {
6130 /* Believe *_POINTER unless the address shape requires otherwise. */
6131 if (REG_P (x) && REG_POINTER (x))
6132 return 2;
6133 if (MEM_P (x) && MEM_POINTER (x))
6134 return 2;
6135
6136 if (REG_P (x) && HARD_REGISTER_P (x))
6137 {
6138 /* X is a hard register. If it only fits one of the base
6139 or index classes, choose that interpretation. */
6140 int regno = REGNO (x);
6141 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6142 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6143 if (base_p != index_p)
6144 return base_p ? 1 : -1;
6145 }
6146 return 0;
6147 }
6148
6149 /* INFO->INNER describes a normal, non-automodified address.
6150 Fill in the rest of INFO accordingly. */
6151
6152 static void
6153 decompose_normal_address (struct address_info *info)
6154 {
6155 /* Treat the address as the sum of up to four values. */
6156 rtx *ops[4];
6157 size_t n_ops = extract_plus_operands (info->inner, ops,
6158 ops + ARRAY_SIZE (ops)) - ops;
6159
6160 /* If there is more than one component, any base component is in a PLUS. */
6161 if (n_ops > 1)
6162 info->base_outer_code = PLUS;
6163
6164 /* Try to classify each sum operand now. Leave those that could be
6165 either a base or an index in OPS. */
6166 rtx *inner_ops[4];
6167 size_t out = 0;
6168 for (size_t in = 0; in < n_ops; ++in)
6169 {
6170 rtx *loc = ops[in];
6171 rtx *inner = strip_address_mutations (loc);
6172 if (CONSTANT_P (*inner))
6173 set_address_disp (info, loc, inner);
6174 else if (GET_CODE (*inner) == UNSPEC)
6175 set_address_segment (info, loc, inner);
6176 else
6177 {
6178 /* The only other possibilities are a base or an index. */
6179 rtx *base_term = get_base_term (inner);
6180 rtx *index_term = get_index_term (inner);
6181 gcc_assert (base_term || index_term);
6182 if (!base_term)
6183 set_address_index (info, loc, index_term);
6184 else if (!index_term)
6185 set_address_base (info, loc, base_term);
6186 else
6187 {
6188 gcc_assert (base_term == index_term);
6189 ops[out] = loc;
6190 inner_ops[out] = base_term;
6191 ++out;
6192 }
6193 }
6194 }
6195
6196 /* Classify the remaining OPS members as bases and indexes. */
6197 if (out == 1)
6198 {
6199 /* If we haven't seen a base or an index yet, assume that this is
6200 the base. If we were confident that another term was the base
6201 or index, treat the remaining operand as the other kind. */
6202 if (!info->base)
6203 set_address_base (info, ops[0], inner_ops[0]);
6204 else
6205 set_address_index (info, ops[0], inner_ops[0]);
6206 }
6207 else if (out == 2)
6208 {
6209 /* In the event of a tie, assume the base comes first. */
6210 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6211 GET_CODE (*ops[1]))
6212 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6213 GET_CODE (*ops[0])))
6214 {
6215 set_address_base (info, ops[0], inner_ops[0]);
6216 set_address_index (info, ops[1], inner_ops[1]);
6217 }
6218 else
6219 {
6220 set_address_base (info, ops[1], inner_ops[1]);
6221 set_address_index (info, ops[0], inner_ops[0]);
6222 }
6223 }
6224 else
6225 gcc_assert (out == 0);
6226 }
6227
6228 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6229 or VOIDmode if not known. AS is the address space associated with LOC.
6230 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6231
6232 void
6233 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6234 addr_space_t as, enum rtx_code outer_code)
6235 {
6236 memset (info, 0, sizeof (*info));
6237 info->mode = mode;
6238 info->as = as;
6239 info->addr_outer_code = outer_code;
6240 info->outer = loc;
6241 info->inner = strip_address_mutations (loc, &outer_code);
6242 info->base_outer_code = outer_code;
6243 switch (GET_CODE (*info->inner))
6244 {
6245 case PRE_DEC:
6246 case PRE_INC:
6247 case POST_DEC:
6248 case POST_INC:
6249 decompose_incdec_address (info);
6250 break;
6251
6252 case PRE_MODIFY:
6253 case POST_MODIFY:
6254 decompose_automod_address (info);
6255 break;
6256
6257 default:
6258 decompose_normal_address (info);
6259 break;
6260 }
6261 }
6262
6263 /* Describe address operand LOC in INFO. */
6264
6265 void
6266 decompose_lea_address (struct address_info *info, rtx *loc)
6267 {
6268 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6269 }
6270
6271 /* Describe the address of MEM X in INFO. */
6272
6273 void
6274 decompose_mem_address (struct address_info *info, rtx x)
6275 {
6276 gcc_assert (MEM_P (x));
6277 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6278 MEM_ADDR_SPACE (x), MEM);
6279 }
6280
6281 /* Update INFO after a change to the address it describes. */
6282
6283 void
6284 update_address (struct address_info *info)
6285 {
6286 decompose_address (info, info->outer, info->mode, info->as,
6287 info->addr_outer_code);
6288 }
6289
6290 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6291 more complicated than that. */
6292
6293 HOST_WIDE_INT
6294 get_index_scale (const struct address_info *info)
6295 {
6296 rtx index = *info->index;
6297 if (GET_CODE (index) == MULT
6298 && CONST_INT_P (XEXP (index, 1))
6299 && info->index_term == &XEXP (index, 0))
6300 return INTVAL (XEXP (index, 1));
6301
6302 if (GET_CODE (index) == ASHIFT
6303 && CONST_INT_P (XEXP (index, 1))
6304 && info->index_term == &XEXP (index, 0))
6305 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6306
6307 if (info->index == info->index_term)
6308 return 1;
6309
6310 return 0;
6311 }
6312
6313 /* Return the "index code" of INFO, in the form required by
6314 ok_for_base_p_1. */
6315
6316 enum rtx_code
6317 get_index_code (const struct address_info *info)
6318 {
6319 if (info->index)
6320 return GET_CODE (*info->index);
6321
6322 if (info->disp)
6323 return GET_CODE (*info->disp);
6324
6325 return SCRATCH;
6326 }
6327
6328 /* Return true if RTL X contains a SYMBOL_REF. */
6329
6330 bool
6331 contains_symbol_ref_p (const_rtx x)
6332 {
6333 subrtx_iterator::array_type array;
6334 FOR_EACH_SUBRTX (iter, array, x, ALL)
6335 if (SYMBOL_REF_P (*iter))
6336 return true;
6337
6338 return false;
6339 }
6340
6341 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6342
6343 bool
6344 contains_symbolic_reference_p (const_rtx x)
6345 {
6346 subrtx_iterator::array_type array;
6347 FOR_EACH_SUBRTX (iter, array, x, ALL)
6348 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6349 return true;
6350
6351 return false;
6352 }
6353
6354 /* Return true if X contains a thread-local symbol. */
6355
6356 bool
6357 tls_referenced_p (const_rtx x)
6358 {
6359 if (!targetm.have_tls)
6360 return false;
6361
6362 subrtx_iterator::array_type array;
6363 FOR_EACH_SUBRTX (iter, array, x, ALL)
6364 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6365 return true;
6366 return false;
6367 }