re PR rtl-optimization/59461 (missed zero-extension elimination in the combiner)
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
45
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
47 const_rtx, machine_mode,
48 unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
53 machine_mode,
54 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
56 machine_mode, unsigned int);
57
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
60
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
65
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
72 DESTINATION. */
73
74 static unsigned int
75 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
76 \f
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
79
80 template <typename T>
81 typename T::value_type *
82 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
83 value_type *base,
84 size_t i, value_type x)
85 {
86 if (base == array.stack)
87 {
88 if (i < LOCAL_ELEMS)
89 {
90 base[i] = x;
91 return base;
92 }
93 gcc_checking_assert (i == LOCAL_ELEMS);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array.heap) <= i)
97 vec_safe_grow (array.heap, i + 1);
98 base = array.heap->address ();
99 memcpy (base, array.stack, sizeof (array.stack));
100 base[LOCAL_ELEMS] = x;
101 return base;
102 }
103 unsigned int length = array.heap->length ();
104 if (length > i)
105 {
106 gcc_checking_assert (base == array.heap->address ());
107 base[i] = x;
108 return base;
109 }
110 else
111 {
112 gcc_checking_assert (i == length);
113 vec_safe_push (array.heap, x);
114 return array.heap->address ();
115 }
116 }
117
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
120
121 template <typename T>
122 size_t
123 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
124 value_type *base,
125 size_t end, rtx_type x)
126 {
127 enum rtx_code code = GET_CODE (x);
128 const char *format = GET_RTX_FORMAT (code);
129 size_t orig_end = end;
130 if (__builtin_expect (INSN_P (x), false))
131 {
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
134 code below. */
135 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
136 if (format[i] == 'e')
137 {
138 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
139 if (__builtin_expect (end < LOCAL_ELEMS, true))
140 base[end++] = subx;
141 else
142 base = add_single_to_queue (array, base, end++, subx);
143 }
144 }
145 else
146 for (int i = 0; format[i]; ++i)
147 if (format[i] == 'e')
148 {
149 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
150 if (__builtin_expect (end < LOCAL_ELEMS, true))
151 base[end++] = subx;
152 else
153 base = add_single_to_queue (array, base, end++, subx);
154 }
155 else if (format[i] == 'E')
156 {
157 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
158 rtx *vec = x->u.fld[i].rt_rtvec->elem;
159 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
160 for (unsigned int j = 0; j < length; j++)
161 base[end++] = T::get_value (vec[j]);
162 else
163 for (unsigned int j = 0; j < length; j++)
164 base = add_single_to_queue (array, base, end++,
165 T::get_value (vec[j]));
166 if (code == SEQUENCE && end == length)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
171 subinstructions. */
172 for (unsigned int j = 0; j < length; j++)
173 {
174 typename T::rtx_type x = T::get_rtx (base[j]);
175 if (INSN_P (x))
176 base[j] = T::get_value (PATTERN (x));
177 }
178 }
179 return end - orig_end;
180 }
181
182 template <typename T>
183 void
184 generic_subrtx_iterator <T>::free_array (array_type &array)
185 {
186 vec_free (array.heap);
187 }
188
189 template <typename T>
190 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
191
192 template class generic_subrtx_iterator <const_rtx_accessor>;
193 template class generic_subrtx_iterator <rtx_var_accessor>;
194 template class generic_subrtx_iterator <rtx_ptr_accessor>;
195
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
200
201 int
202 rtx_unstable_p (const_rtx x)
203 {
204 const RTX_CODE code = GET_CODE (x);
205 int i;
206 const char *fmt;
207
208 switch (code)
209 {
210 case MEM:
211 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
212
213 case CONST:
214 CASE_CONST_ANY:
215 case SYMBOL_REF:
216 case LABEL_REF:
217 return 0;
218
219 case REG:
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
224 return 0;
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
229 return 0;
230 return 1;
231
232 case ASM_OPERANDS:
233 if (MEM_VOLATILE_P (x))
234 return 1;
235
236 /* Fall through. */
237
238 default:
239 break;
240 }
241
242 fmt = GET_RTX_FORMAT (code);
243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
244 if (fmt[i] == 'e')
245 {
246 if (rtx_unstable_p (XEXP (x, i)))
247 return 1;
248 }
249 else if (fmt[i] == 'E')
250 {
251 int j;
252 for (j = 0; j < XVECLEN (x, i); j++)
253 if (rtx_unstable_p (XVECEXP (x, i, j)))
254 return 1;
255 }
256
257 return 0;
258 }
259
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
266
267 bool
268 rtx_varies_p (const_rtx x, bool for_alias)
269 {
270 RTX_CODE code;
271 int i;
272 const char *fmt;
273
274 if (!x)
275 return 0;
276
277 code = GET_CODE (x);
278 switch (code)
279 {
280 case MEM:
281 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
282
283 case CONST:
284 CASE_CONST_ANY:
285 case SYMBOL_REF:
286 case LABEL_REF:
287 return 0;
288
289 case REG:
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
293 for pseudos. */
294 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
297 return 0;
298 if (x == pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
304 return 0;
305 return 1;
306
307 case LO_SUM:
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
312 || rtx_varies_p (XEXP (x, 1), for_alias);
313
314 case ASM_OPERANDS:
315 if (MEM_VOLATILE_P (x))
316 return 1;
317
318 /* Fall through. */
319
320 default:
321 break;
322 }
323
324 fmt = GET_RTX_FORMAT (code);
325 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
326 if (fmt[i] == 'e')
327 {
328 if (rtx_varies_p (XEXP (x, i), for_alias))
329 return 1;
330 }
331 else if (fmt[i] == 'E')
332 {
333 int j;
334 for (j = 0; j < XVECLEN (x, i); j++)
335 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
336 return 1;
337 }
338
339 return 0;
340 }
341
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
344 of the routine. */
345
346 static HOST_WIDE_INT
347 get_initial_register_offset (int from, int to)
348 {
349 static const struct elim_table_t
350 {
351 const int from;
352 const int to;
353 } table[] = ELIMINABLE_REGS;
354 HOST_WIDE_INT offset1, offset2;
355 unsigned int i, j;
356
357 if (to == from)
358 return 0;
359
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed)
364 {
365 offset1 = crtl->outgoing_args_size + get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
367 offset1 = - offset1;
368 #endif
369 if (to == STACK_POINTER_REGNUM)
370 return offset1;
371 else if (from == STACK_POINTER_REGNUM)
372 return - offset1;
373 else
374 return 0;
375 }
376
377 for (i = 0; i < ARRAY_SIZE (table); i++)
378 if (table[i].from == from)
379 {
380 if (table[i].to == to)
381 {
382 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
383 offset1);
384 return offset1;
385 }
386 for (j = 0; j < ARRAY_SIZE (table); j++)
387 {
388 if (table[j].to == to
389 && table[j].from == table[i].to)
390 {
391 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
392 offset1);
393 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
394 offset2);
395 return offset1 + offset2;
396 }
397 if (table[j].from == to
398 && table[j].to == table[i].to)
399 {
400 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
401 offset1);
402 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
403 offset2);
404 return offset1 - offset2;
405 }
406 }
407 }
408 else if (table[i].to == from)
409 {
410 if (table[i].from == to)
411 {
412 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
413 offset1);
414 return - offset1;
415 }
416 for (j = 0; j < ARRAY_SIZE (table); j++)
417 {
418 if (table[j].to == to
419 && table[j].from == table[i].from)
420 {
421 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
422 offset1);
423 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
424 offset2);
425 return - offset1 + offset2;
426 }
427 if (table[j].from == to
428 && table[j].to == table[i].from)
429 {
430 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
431 offset1);
432 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
433 offset2);
434 return - offset1 - offset2;
435 }
436 }
437 }
438
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from == ARG_POINTER_REGNUM)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
443 else if (to == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
445 else if (from == HARD_FRAME_POINTER_REGNUM)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
447 else if (to == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
449 else
450 return 0;
451 }
452
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
457
458 static int
459 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
460 machine_mode mode, bool unaligned_mems)
461 {
462 enum rtx_code code = GET_CODE (x);
463
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
467 {
468 HOST_WIDE_INT actual_offset = offset;
469
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
476 actual_offset -= STACK_POINTER_OFFSET;
477 #endif
478
479 if (actual_offset % GET_MODE_SIZE (mode) != 0)
480 return 1;
481 }
482
483 switch (code)
484 {
485 case SYMBOL_REF:
486 if (SYMBOL_REF_WEAK (x))
487 return 1;
488 if (!CONSTANT_POOL_ADDRESS_P (x))
489 {
490 tree decl;
491 HOST_WIDE_INT decl_size;
492
493 if (offset < 0)
494 return 1;
495 if (size == 0)
496 size = GET_MODE_SIZE (mode);
497 if (size == 0)
498 return offset != 0;
499
500 /* If the size of the access or of the symbol is unknown,
501 assume the worst. */
502 decl = SYMBOL_REF_DECL (x);
503
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
506 if (!decl)
507 decl_size = -1;
508 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
511 : -1);
512 else if (TREE_CODE (decl) == STRING_CST)
513 decl_size = TREE_STRING_LENGTH (decl);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
515 decl_size = int_size_in_bytes (TREE_TYPE (decl));
516 else
517 decl_size = -1;
518
519 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
520 }
521
522 return 0;
523
524 case LABEL_REF:
525 return 0;
526
527 case REG:
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
531 || x == stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
534 {
535 #ifdef RED_ZONE_SIZE
536 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
537 #else
538 HOST_WIDE_INT red_zone_size = 0;
539 #endif
540 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
541 / BITS_PER_UNIT;
542 HOST_WIDE_INT low_bound, high_bound;
543
544 if (size == 0)
545 size = GET_MODE_SIZE (mode);
546
547 if (x == frame_pointer_rtx)
548 {
549 if (FRAME_GROWS_DOWNWARD)
550 {
551 high_bound = STARTING_FRAME_OFFSET;
552 low_bound = high_bound - get_frame_size ();
553 }
554 else
555 {
556 low_bound = STARTING_FRAME_OFFSET;
557 high_bound = low_bound + get_frame_size ();
558 }
559 }
560 else if (x == hard_frame_pointer_rtx)
561 {
562 HOST_WIDE_INT sp_offset
563 = get_initial_register_offset (STACK_POINTER_REGNUM,
564 HARD_FRAME_POINTER_REGNUM);
565 HOST_WIDE_INT ap_offset
566 = get_initial_register_offset (ARG_POINTER_REGNUM,
567 HARD_FRAME_POINTER_REGNUM);
568
569 #if STACK_GROWS_DOWNWARD
570 low_bound = sp_offset - red_zone_size - stack_boundary;
571 high_bound = ap_offset
572 + FIRST_PARM_OFFSET (current_function_decl)
573 #if !ARGS_GROW_DOWNWARD
574 + crtl->args.size
575 #endif
576 + stack_boundary;
577 #else
578 high_bound = sp_offset + red_zone_size + stack_boundary;
579 low_bound = ap_offset
580 + FIRST_PARM_OFFSET (current_function_decl)
581 #if ARGS_GROW_DOWNWARD
582 - crtl->args.size
583 #endif
584 - stack_boundary;
585 #endif
586 }
587 else if (x == stack_pointer_rtx)
588 {
589 HOST_WIDE_INT ap_offset
590 = get_initial_register_offset (ARG_POINTER_REGNUM,
591 STACK_POINTER_REGNUM);
592
593 #if STACK_GROWS_DOWNWARD
594 low_bound = - red_zone_size - stack_boundary;
595 high_bound = ap_offset
596 + FIRST_PARM_OFFSET (current_function_decl)
597 #if !ARGS_GROW_DOWNWARD
598 + crtl->args.size
599 #endif
600 + stack_boundary;
601 #else
602 high_bound = red_zone_size + stack_boundary;
603 low_bound = ap_offset
604 + FIRST_PARM_OFFSET (current_function_decl)
605 #if ARGS_GROW_DOWNWARD
606 - crtl->args.size
607 #endif
608 - stack_boundary;
609 #endif
610 }
611 else
612 {
613 /* We assume that accesses are safe to at least the
614 next stack boundary.
615 Examples are varargs and __builtin_return_address. */
616 #if ARGS_GROW_DOWNWARD
617 high_bound = FIRST_PARM_OFFSET (current_function_decl)
618 + stack_boundary;
619 low_bound = FIRST_PARM_OFFSET (current_function_decl)
620 - crtl->args.size - stack_boundary;
621 #else
622 low_bound = FIRST_PARM_OFFSET (current_function_decl)
623 - stack_boundary;
624 high_bound = FIRST_PARM_OFFSET (current_function_decl)
625 + crtl->args.size + stack_boundary;
626 #endif
627 }
628
629 if (offset >= low_bound && offset <= high_bound - size)
630 return 0;
631 return 1;
632 }
633 /* All of the virtual frame registers are stack references. */
634 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
635 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
636 return 0;
637 return 1;
638
639 case CONST:
640 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
641 mode, unaligned_mems);
642
643 case PLUS:
644 /* An address is assumed not to trap if:
645 - it is the pic register plus a constant. */
646 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
647 return 0;
648
649 /* - or it is an address that can't trap plus a constant integer. */
650 if (CONST_INT_P (XEXP (x, 1))
651 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
652 size, mode, unaligned_mems))
653 return 0;
654
655 return 1;
656
657 case LO_SUM:
658 case PRE_MODIFY:
659 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
660 mode, unaligned_mems);
661
662 case PRE_DEC:
663 case PRE_INC:
664 case POST_DEC:
665 case POST_INC:
666 case POST_MODIFY:
667 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
668 mode, unaligned_mems);
669
670 default:
671 break;
672 }
673
674 /* If it isn't one of the case above, it can cause a trap. */
675 return 1;
676 }
677
678 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
679
680 int
681 rtx_addr_can_trap_p (const_rtx x)
682 {
683 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
684 }
685
686 /* Return true if X is an address that is known to not be zero. */
687
688 bool
689 nonzero_address_p (const_rtx x)
690 {
691 const enum rtx_code code = GET_CODE (x);
692
693 switch (code)
694 {
695 case SYMBOL_REF:
696 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
697
698 case LABEL_REF:
699 return true;
700
701 case REG:
702 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
703 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
704 || x == stack_pointer_rtx
705 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
706 return true;
707 /* All of the virtual frame registers are stack references. */
708 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
709 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
710 return true;
711 return false;
712
713 case CONST:
714 return nonzero_address_p (XEXP (x, 0));
715
716 case PLUS:
717 /* Handle PIC references. */
718 if (XEXP (x, 0) == pic_offset_table_rtx
719 && CONSTANT_P (XEXP (x, 1)))
720 return true;
721 return false;
722
723 case PRE_MODIFY:
724 /* Similar to the above; allow positive offsets. Further, since
725 auto-inc is only allowed in memories, the register must be a
726 pointer. */
727 if (CONST_INT_P (XEXP (x, 1))
728 && INTVAL (XEXP (x, 1)) > 0)
729 return true;
730 return nonzero_address_p (XEXP (x, 0));
731
732 case PRE_INC:
733 /* Similarly. Further, the offset is always positive. */
734 return true;
735
736 case PRE_DEC:
737 case POST_DEC:
738 case POST_INC:
739 case POST_MODIFY:
740 return nonzero_address_p (XEXP (x, 0));
741
742 case LO_SUM:
743 return nonzero_address_p (XEXP (x, 1));
744
745 default:
746 break;
747 }
748
749 /* If it isn't one of the case above, might be zero. */
750 return false;
751 }
752
753 /* Return 1 if X refers to a memory location whose address
754 cannot be compared reliably with constant addresses,
755 or if X refers to a BLKmode memory object.
756 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
757 zero, we are slightly more conservative. */
758
759 bool
760 rtx_addr_varies_p (const_rtx x, bool for_alias)
761 {
762 enum rtx_code code;
763 int i;
764 const char *fmt;
765
766 if (x == 0)
767 return 0;
768
769 code = GET_CODE (x);
770 if (code == MEM)
771 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
772
773 fmt = GET_RTX_FORMAT (code);
774 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
775 if (fmt[i] == 'e')
776 {
777 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
778 return 1;
779 }
780 else if (fmt[i] == 'E')
781 {
782 int j;
783 for (j = 0; j < XVECLEN (x, i); j++)
784 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
785 return 1;
786 }
787 return 0;
788 }
789 \f
790 /* Return the CALL in X if there is one. */
791
792 rtx
793 get_call_rtx_from (rtx x)
794 {
795 if (INSN_P (x))
796 x = PATTERN (x);
797 if (GET_CODE (x) == PARALLEL)
798 x = XVECEXP (x, 0, 0);
799 if (GET_CODE (x) == SET)
800 x = SET_SRC (x);
801 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
802 return x;
803 return NULL_RTX;
804 }
805 \f
806 /* Return the value of the integer term in X, if one is apparent;
807 otherwise return 0.
808 Only obvious integer terms are detected.
809 This is used in cse.c with the `related_value' field. */
810
811 HOST_WIDE_INT
812 get_integer_term (const_rtx x)
813 {
814 if (GET_CODE (x) == CONST)
815 x = XEXP (x, 0);
816
817 if (GET_CODE (x) == MINUS
818 && CONST_INT_P (XEXP (x, 1)))
819 return - INTVAL (XEXP (x, 1));
820 if (GET_CODE (x) == PLUS
821 && CONST_INT_P (XEXP (x, 1)))
822 return INTVAL (XEXP (x, 1));
823 return 0;
824 }
825
826 /* If X is a constant, return the value sans apparent integer term;
827 otherwise return 0.
828 Only obvious integer terms are detected. */
829
830 rtx
831 get_related_value (const_rtx x)
832 {
833 if (GET_CODE (x) != CONST)
834 return 0;
835 x = XEXP (x, 0);
836 if (GET_CODE (x) == PLUS
837 && CONST_INT_P (XEXP (x, 1)))
838 return XEXP (x, 0);
839 else if (GET_CODE (x) == MINUS
840 && CONST_INT_P (XEXP (x, 1)))
841 return XEXP (x, 0);
842 return 0;
843 }
844 \f
845 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
846 to somewhere in the same object or object_block as SYMBOL. */
847
848 bool
849 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
850 {
851 tree decl;
852
853 if (GET_CODE (symbol) != SYMBOL_REF)
854 return false;
855
856 if (offset == 0)
857 return true;
858
859 if (offset > 0)
860 {
861 if (CONSTANT_POOL_ADDRESS_P (symbol)
862 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
863 return true;
864
865 decl = SYMBOL_REF_DECL (symbol);
866 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
867 return true;
868 }
869
870 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
871 && SYMBOL_REF_BLOCK (symbol)
872 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
873 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
874 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
875 return true;
876
877 return false;
878 }
879
880 /* Split X into a base and a constant offset, storing them in *BASE_OUT
881 and *OFFSET_OUT respectively. */
882
883 void
884 split_const (rtx x, rtx *base_out, rtx *offset_out)
885 {
886 if (GET_CODE (x) == CONST)
887 {
888 x = XEXP (x, 0);
889 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
890 {
891 *base_out = XEXP (x, 0);
892 *offset_out = XEXP (x, 1);
893 return;
894 }
895 }
896 *base_out = x;
897 *offset_out = const0_rtx;
898 }
899 \f
900 /* Return the number of places FIND appears within X. If COUNT_DEST is
901 zero, we do not count occurrences inside the destination of a SET. */
902
903 int
904 count_occurrences (const_rtx x, const_rtx find, int count_dest)
905 {
906 int i, j;
907 enum rtx_code code;
908 const char *format_ptr;
909 int count;
910
911 if (x == find)
912 return 1;
913
914 code = GET_CODE (x);
915
916 switch (code)
917 {
918 case REG:
919 CASE_CONST_ANY:
920 case SYMBOL_REF:
921 case CODE_LABEL:
922 case PC:
923 case CC0:
924 return 0;
925
926 case EXPR_LIST:
927 count = count_occurrences (XEXP (x, 0), find, count_dest);
928 if (XEXP (x, 1))
929 count += count_occurrences (XEXP (x, 1), find, count_dest);
930 return count;
931
932 case MEM:
933 if (MEM_P (find) && rtx_equal_p (x, find))
934 return 1;
935 break;
936
937 case SET:
938 if (SET_DEST (x) == find && ! count_dest)
939 return count_occurrences (SET_SRC (x), find, count_dest);
940 break;
941
942 default:
943 break;
944 }
945
946 format_ptr = GET_RTX_FORMAT (code);
947 count = 0;
948
949 for (i = 0; i < GET_RTX_LENGTH (code); i++)
950 {
951 switch (*format_ptr++)
952 {
953 case 'e':
954 count += count_occurrences (XEXP (x, i), find, count_dest);
955 break;
956
957 case 'E':
958 for (j = 0; j < XVECLEN (x, i); j++)
959 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
960 break;
961 }
962 }
963 return count;
964 }
965
966 \f
967 /* Return TRUE if OP is a register or subreg of a register that
968 holds an unsigned quantity. Otherwise, return FALSE. */
969
970 bool
971 unsigned_reg_p (rtx op)
972 {
973 if (REG_P (op)
974 && REG_EXPR (op)
975 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
976 return true;
977
978 if (GET_CODE (op) == SUBREG
979 && SUBREG_PROMOTED_SIGN (op))
980 return true;
981
982 return false;
983 }
984
985 \f
986 /* Nonzero if register REG appears somewhere within IN.
987 Also works if REG is not a register; in this case it checks
988 for a subexpression of IN that is Lisp "equal" to REG. */
989
990 int
991 reg_mentioned_p (const_rtx reg, const_rtx in)
992 {
993 const char *fmt;
994 int i;
995 enum rtx_code code;
996
997 if (in == 0)
998 return 0;
999
1000 if (reg == in)
1001 return 1;
1002
1003 if (GET_CODE (in) == LABEL_REF)
1004 return reg == label_ref_label (in);
1005
1006 code = GET_CODE (in);
1007
1008 switch (code)
1009 {
1010 /* Compare registers by number. */
1011 case REG:
1012 return REG_P (reg) && REGNO (in) == REGNO (reg);
1013
1014 /* These codes have no constituent expressions
1015 and are unique. */
1016 case SCRATCH:
1017 case CC0:
1018 case PC:
1019 return 0;
1020
1021 CASE_CONST_ANY:
1022 /* These are kept unique for a given value. */
1023 return 0;
1024
1025 default:
1026 break;
1027 }
1028
1029 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1030 return 1;
1031
1032 fmt = GET_RTX_FORMAT (code);
1033
1034 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1035 {
1036 if (fmt[i] == 'E')
1037 {
1038 int j;
1039 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1040 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1041 return 1;
1042 }
1043 else if (fmt[i] == 'e'
1044 && reg_mentioned_p (reg, XEXP (in, i)))
1045 return 1;
1046 }
1047 return 0;
1048 }
1049 \f
1050 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1051 no CODE_LABEL insn. */
1052
1053 int
1054 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1055 {
1056 rtx_insn *p;
1057 if (beg == end)
1058 return 0;
1059 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1060 if (LABEL_P (p))
1061 return 0;
1062 return 1;
1063 }
1064
1065 /* Nonzero if register REG is used in an insn between
1066 FROM_INSN and TO_INSN (exclusive of those two). */
1067
1068 int
1069 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1070 const rtx_insn *to_insn)
1071 {
1072 rtx_insn *insn;
1073
1074 if (from_insn == to_insn)
1075 return 0;
1076
1077 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1078 if (NONDEBUG_INSN_P (insn)
1079 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1080 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1081 return 1;
1082 return 0;
1083 }
1084 \f
1085 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1086 is entirely replaced by a new value and the only use is as a SET_DEST,
1087 we do not consider it a reference. */
1088
1089 int
1090 reg_referenced_p (const_rtx x, const_rtx body)
1091 {
1092 int i;
1093
1094 switch (GET_CODE (body))
1095 {
1096 case SET:
1097 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1098 return 1;
1099
1100 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1101 of a REG that occupies all of the REG, the insn references X if
1102 it is mentioned in the destination. */
1103 if (GET_CODE (SET_DEST (body)) != CC0
1104 && GET_CODE (SET_DEST (body)) != PC
1105 && !REG_P (SET_DEST (body))
1106 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1107 && REG_P (SUBREG_REG (SET_DEST (body)))
1108 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1109 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1110 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1111 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1112 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1113 return 1;
1114 return 0;
1115
1116 case ASM_OPERANDS:
1117 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1118 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1119 return 1;
1120 return 0;
1121
1122 case CALL:
1123 case USE:
1124 case IF_THEN_ELSE:
1125 return reg_overlap_mentioned_p (x, body);
1126
1127 case TRAP_IF:
1128 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1129
1130 case PREFETCH:
1131 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1132
1133 case UNSPEC:
1134 case UNSPEC_VOLATILE:
1135 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1136 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1137 return 1;
1138 return 0;
1139
1140 case PARALLEL:
1141 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1142 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1143 return 1;
1144 return 0;
1145
1146 case CLOBBER:
1147 if (MEM_P (XEXP (body, 0)))
1148 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1149 return 1;
1150 return 0;
1151
1152 case COND_EXEC:
1153 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1154 return 1;
1155 return reg_referenced_p (x, COND_EXEC_CODE (body));
1156
1157 default:
1158 return 0;
1159 }
1160 }
1161 \f
1162 /* Nonzero if register REG is set or clobbered in an insn between
1163 FROM_INSN and TO_INSN (exclusive of those two). */
1164
1165 int
1166 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1167 const rtx_insn *to_insn)
1168 {
1169 const rtx_insn *insn;
1170
1171 if (from_insn == to_insn)
1172 return 0;
1173
1174 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1175 if (INSN_P (insn) && reg_set_p (reg, insn))
1176 return 1;
1177 return 0;
1178 }
1179
1180 /* Return true if REG is set or clobbered inside INSN. */
1181
1182 int
1183 reg_set_p (const_rtx reg, const_rtx insn)
1184 {
1185 /* After delay slot handling, call and branch insns might be in a
1186 sequence. Check all the elements there. */
1187 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1188 {
1189 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1190 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1191 return true;
1192
1193 return false;
1194 }
1195
1196 /* We can be passed an insn or part of one. If we are passed an insn,
1197 check if a side-effect of the insn clobbers REG. */
1198 if (INSN_P (insn)
1199 && (FIND_REG_INC_NOTE (insn, reg)
1200 || (CALL_P (insn)
1201 && ((REG_P (reg)
1202 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1203 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1204 GET_MODE (reg), REGNO (reg)))
1205 || MEM_P (reg)
1206 || find_reg_fusage (insn, CLOBBER, reg)))))
1207 return true;
1208
1209 return set_of (reg, insn) != NULL_RTX;
1210 }
1211
1212 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1213 only if none of them are modified between START and END. Return 1 if
1214 X contains a MEM; this routine does use memory aliasing. */
1215
1216 int
1217 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1218 {
1219 const enum rtx_code code = GET_CODE (x);
1220 const char *fmt;
1221 int i, j;
1222 rtx_insn *insn;
1223
1224 if (start == end)
1225 return 0;
1226
1227 switch (code)
1228 {
1229 CASE_CONST_ANY:
1230 case CONST:
1231 case SYMBOL_REF:
1232 case LABEL_REF:
1233 return 0;
1234
1235 case PC:
1236 case CC0:
1237 return 1;
1238
1239 case MEM:
1240 if (modified_between_p (XEXP (x, 0), start, end))
1241 return 1;
1242 if (MEM_READONLY_P (x))
1243 return 0;
1244 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1245 if (memory_modified_in_insn_p (x, insn))
1246 return 1;
1247 return 0;
1248
1249 case REG:
1250 return reg_set_between_p (x, start, end);
1251
1252 default:
1253 break;
1254 }
1255
1256 fmt = GET_RTX_FORMAT (code);
1257 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1258 {
1259 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1260 return 1;
1261
1262 else if (fmt[i] == 'E')
1263 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1264 if (modified_between_p (XVECEXP (x, i, j), start, end))
1265 return 1;
1266 }
1267
1268 return 0;
1269 }
1270
1271 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1272 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1273 does use memory aliasing. */
1274
1275 int
1276 modified_in_p (const_rtx x, const_rtx insn)
1277 {
1278 const enum rtx_code code = GET_CODE (x);
1279 const char *fmt;
1280 int i, j;
1281
1282 switch (code)
1283 {
1284 CASE_CONST_ANY:
1285 case CONST:
1286 case SYMBOL_REF:
1287 case LABEL_REF:
1288 return 0;
1289
1290 case PC:
1291 case CC0:
1292 return 1;
1293
1294 case MEM:
1295 if (modified_in_p (XEXP (x, 0), insn))
1296 return 1;
1297 if (MEM_READONLY_P (x))
1298 return 0;
1299 if (memory_modified_in_insn_p (x, insn))
1300 return 1;
1301 return 0;
1302
1303 case REG:
1304 return reg_set_p (x, insn);
1305
1306 default:
1307 break;
1308 }
1309
1310 fmt = GET_RTX_FORMAT (code);
1311 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1312 {
1313 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1314 return 1;
1315
1316 else if (fmt[i] == 'E')
1317 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1318 if (modified_in_p (XVECEXP (x, i, j), insn))
1319 return 1;
1320 }
1321
1322 return 0;
1323 }
1324 \f
1325 /* Helper function for set_of. */
1326 struct set_of_data
1327 {
1328 const_rtx found;
1329 const_rtx pat;
1330 };
1331
1332 static void
1333 set_of_1 (rtx x, const_rtx pat, void *data1)
1334 {
1335 struct set_of_data *const data = (struct set_of_data *) (data1);
1336 if (rtx_equal_p (x, data->pat)
1337 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1338 data->found = pat;
1339 }
1340
1341 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1342 (either directly or via STRICT_LOW_PART and similar modifiers). */
1343 const_rtx
1344 set_of (const_rtx pat, const_rtx insn)
1345 {
1346 struct set_of_data data;
1347 data.found = NULL_RTX;
1348 data.pat = pat;
1349 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1350 return data.found;
1351 }
1352
1353 /* Add all hard register in X to *PSET. */
1354 void
1355 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1356 {
1357 subrtx_iterator::array_type array;
1358 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1359 {
1360 const_rtx x = *iter;
1361 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1362 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1363 }
1364 }
1365
1366 /* This function, called through note_stores, collects sets and
1367 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1368 by DATA. */
1369 void
1370 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1371 {
1372 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1373 if (REG_P (x) && HARD_REGISTER_P (x))
1374 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1375 }
1376
1377 /* Examine INSN, and compute the set of hard registers written by it.
1378 Store it in *PSET. Should only be called after reload. */
1379 void
1380 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1381 {
1382 rtx link;
1383
1384 CLEAR_HARD_REG_SET (*pset);
1385 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1386 if (CALL_P (insn))
1387 {
1388 if (implicit)
1389 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1390
1391 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1392 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1393 }
1394 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1395 if (REG_NOTE_KIND (link) == REG_INC)
1396 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1397 }
1398
1399 /* Like record_hard_reg_sets, but called through note_uses. */
1400 void
1401 record_hard_reg_uses (rtx *px, void *data)
1402 {
1403 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1404 }
1405 \f
1406 /* Given an INSN, return a SET expression if this insn has only a single SET.
1407 It may also have CLOBBERs, USEs, or SET whose output
1408 will not be used, which we ignore. */
1409
1410 rtx
1411 single_set_2 (const rtx_insn *insn, const_rtx pat)
1412 {
1413 rtx set = NULL;
1414 int set_verified = 1;
1415 int i;
1416
1417 if (GET_CODE (pat) == PARALLEL)
1418 {
1419 for (i = 0; i < XVECLEN (pat, 0); i++)
1420 {
1421 rtx sub = XVECEXP (pat, 0, i);
1422 switch (GET_CODE (sub))
1423 {
1424 case USE:
1425 case CLOBBER:
1426 break;
1427
1428 case SET:
1429 /* We can consider insns having multiple sets, where all
1430 but one are dead as single set insns. In common case
1431 only single set is present in the pattern so we want
1432 to avoid checking for REG_UNUSED notes unless necessary.
1433
1434 When we reach set first time, we just expect this is
1435 the single set we are looking for and only when more
1436 sets are found in the insn, we check them. */
1437 if (!set_verified)
1438 {
1439 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1440 && !side_effects_p (set))
1441 set = NULL;
1442 else
1443 set_verified = 1;
1444 }
1445 if (!set)
1446 set = sub, set_verified = 0;
1447 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1448 || side_effects_p (sub))
1449 return NULL_RTX;
1450 break;
1451
1452 default:
1453 return NULL_RTX;
1454 }
1455 }
1456 }
1457 return set;
1458 }
1459
1460 /* Given an INSN, return nonzero if it has more than one SET, else return
1461 zero. */
1462
1463 int
1464 multiple_sets (const_rtx insn)
1465 {
1466 int found;
1467 int i;
1468
1469 /* INSN must be an insn. */
1470 if (! INSN_P (insn))
1471 return 0;
1472
1473 /* Only a PARALLEL can have multiple SETs. */
1474 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1475 {
1476 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1477 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1478 {
1479 /* If we have already found a SET, then return now. */
1480 if (found)
1481 return 1;
1482 else
1483 found = 1;
1484 }
1485 }
1486
1487 /* Either zero or one SET. */
1488 return 0;
1489 }
1490 \f
1491 /* Return nonzero if the destination of SET equals the source
1492 and there are no side effects. */
1493
1494 int
1495 set_noop_p (const_rtx set)
1496 {
1497 rtx src = SET_SRC (set);
1498 rtx dst = SET_DEST (set);
1499
1500 if (dst == pc_rtx && src == pc_rtx)
1501 return 1;
1502
1503 if (MEM_P (dst) && MEM_P (src))
1504 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1505
1506 if (GET_CODE (dst) == ZERO_EXTRACT)
1507 return rtx_equal_p (XEXP (dst, 0), src)
1508 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1509 && !side_effects_p (src);
1510
1511 if (GET_CODE (dst) == STRICT_LOW_PART)
1512 dst = XEXP (dst, 0);
1513
1514 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1515 {
1516 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1517 return 0;
1518 src = SUBREG_REG (src);
1519 dst = SUBREG_REG (dst);
1520 }
1521
1522 /* It is a NOOP if destination overlaps with selected src vector
1523 elements. */
1524 if (GET_CODE (src) == VEC_SELECT
1525 && REG_P (XEXP (src, 0)) && REG_P (dst)
1526 && HARD_REGISTER_P (XEXP (src, 0))
1527 && HARD_REGISTER_P (dst))
1528 {
1529 int i;
1530 rtx par = XEXP (src, 1);
1531 rtx src0 = XEXP (src, 0);
1532 int c0 = INTVAL (XVECEXP (par, 0, 0));
1533 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1534
1535 for (i = 1; i < XVECLEN (par, 0); i++)
1536 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1537 return 0;
1538 return
1539 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1540 offset, GET_MODE (dst)) == (int) REGNO (dst);
1541 }
1542
1543 return (REG_P (src) && REG_P (dst)
1544 && REGNO (src) == REGNO (dst));
1545 }
1546 \f
1547 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1548 value to itself. */
1549
1550 int
1551 noop_move_p (const rtx_insn *insn)
1552 {
1553 rtx pat = PATTERN (insn);
1554
1555 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1556 return 1;
1557
1558 /* Insns carrying these notes are useful later on. */
1559 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1560 return 0;
1561
1562 /* Check the code to be executed for COND_EXEC. */
1563 if (GET_CODE (pat) == COND_EXEC)
1564 pat = COND_EXEC_CODE (pat);
1565
1566 if (GET_CODE (pat) == SET && set_noop_p (pat))
1567 return 1;
1568
1569 if (GET_CODE (pat) == PARALLEL)
1570 {
1571 int i;
1572 /* If nothing but SETs of registers to themselves,
1573 this insn can also be deleted. */
1574 for (i = 0; i < XVECLEN (pat, 0); i++)
1575 {
1576 rtx tem = XVECEXP (pat, 0, i);
1577
1578 if (GET_CODE (tem) == USE
1579 || GET_CODE (tem) == CLOBBER)
1580 continue;
1581
1582 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1583 return 0;
1584 }
1585
1586 return 1;
1587 }
1588 return 0;
1589 }
1590 \f
1591
1592 /* Return nonzero if register in range [REGNO, ENDREGNO)
1593 appears either explicitly or implicitly in X
1594 other than being stored into.
1595
1596 References contained within the substructure at LOC do not count.
1597 LOC may be zero, meaning don't ignore anything. */
1598
1599 bool
1600 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1601 rtx *loc)
1602 {
1603 int i;
1604 unsigned int x_regno;
1605 RTX_CODE code;
1606 const char *fmt;
1607
1608 repeat:
1609 /* The contents of a REG_NONNEG note is always zero, so we must come here
1610 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1611 if (x == 0)
1612 return false;
1613
1614 code = GET_CODE (x);
1615
1616 switch (code)
1617 {
1618 case REG:
1619 x_regno = REGNO (x);
1620
1621 /* If we modifying the stack, frame, or argument pointer, it will
1622 clobber a virtual register. In fact, we could be more precise,
1623 but it isn't worth it. */
1624 if ((x_regno == STACK_POINTER_REGNUM
1625 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1626 && x_regno == ARG_POINTER_REGNUM)
1627 || x_regno == FRAME_POINTER_REGNUM)
1628 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1629 return true;
1630
1631 return endregno > x_regno && regno < END_REGNO (x);
1632
1633 case SUBREG:
1634 /* If this is a SUBREG of a hard reg, we can see exactly which
1635 registers are being modified. Otherwise, handle normally. */
1636 if (REG_P (SUBREG_REG (x))
1637 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1638 {
1639 unsigned int inner_regno = subreg_regno (x);
1640 unsigned int inner_endregno
1641 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1642 ? subreg_nregs (x) : 1);
1643
1644 return endregno > inner_regno && regno < inner_endregno;
1645 }
1646 break;
1647
1648 case CLOBBER:
1649 case SET:
1650 if (&SET_DEST (x) != loc
1651 /* Note setting a SUBREG counts as referring to the REG it is in for
1652 a pseudo but not for hard registers since we can
1653 treat each word individually. */
1654 && ((GET_CODE (SET_DEST (x)) == SUBREG
1655 && loc != &SUBREG_REG (SET_DEST (x))
1656 && REG_P (SUBREG_REG (SET_DEST (x)))
1657 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1658 && refers_to_regno_p (regno, endregno,
1659 SUBREG_REG (SET_DEST (x)), loc))
1660 || (!REG_P (SET_DEST (x))
1661 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1662 return true;
1663
1664 if (code == CLOBBER || loc == &SET_SRC (x))
1665 return false;
1666 x = SET_SRC (x);
1667 goto repeat;
1668
1669 default:
1670 break;
1671 }
1672
1673 /* X does not match, so try its subexpressions. */
1674
1675 fmt = GET_RTX_FORMAT (code);
1676 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1677 {
1678 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1679 {
1680 if (i == 0)
1681 {
1682 x = XEXP (x, 0);
1683 goto repeat;
1684 }
1685 else
1686 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1687 return true;
1688 }
1689 else if (fmt[i] == 'E')
1690 {
1691 int j;
1692 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1693 if (loc != &XVECEXP (x, i, j)
1694 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1695 return true;
1696 }
1697 }
1698 return false;
1699 }
1700
1701 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1702 we check if any register number in X conflicts with the relevant register
1703 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1704 contains a MEM (we don't bother checking for memory addresses that can't
1705 conflict because we expect this to be a rare case. */
1706
1707 int
1708 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1709 {
1710 unsigned int regno, endregno;
1711
1712 /* If either argument is a constant, then modifying X can not
1713 affect IN. Here we look at IN, we can profitably combine
1714 CONSTANT_P (x) with the switch statement below. */
1715 if (CONSTANT_P (in))
1716 return 0;
1717
1718 recurse:
1719 switch (GET_CODE (x))
1720 {
1721 case STRICT_LOW_PART:
1722 case ZERO_EXTRACT:
1723 case SIGN_EXTRACT:
1724 /* Overly conservative. */
1725 x = XEXP (x, 0);
1726 goto recurse;
1727
1728 case SUBREG:
1729 regno = REGNO (SUBREG_REG (x));
1730 if (regno < FIRST_PSEUDO_REGISTER)
1731 regno = subreg_regno (x);
1732 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1733 ? subreg_nregs (x) : 1);
1734 goto do_reg;
1735
1736 case REG:
1737 regno = REGNO (x);
1738 endregno = END_REGNO (x);
1739 do_reg:
1740 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1741
1742 case MEM:
1743 {
1744 const char *fmt;
1745 int i;
1746
1747 if (MEM_P (in))
1748 return 1;
1749
1750 fmt = GET_RTX_FORMAT (GET_CODE (in));
1751 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1752 if (fmt[i] == 'e')
1753 {
1754 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1755 return 1;
1756 }
1757 else if (fmt[i] == 'E')
1758 {
1759 int j;
1760 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1761 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1762 return 1;
1763 }
1764
1765 return 0;
1766 }
1767
1768 case SCRATCH:
1769 case PC:
1770 case CC0:
1771 return reg_mentioned_p (x, in);
1772
1773 case PARALLEL:
1774 {
1775 int i;
1776
1777 /* If any register in here refers to it we return true. */
1778 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1779 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1780 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1781 return 1;
1782 return 0;
1783 }
1784
1785 default:
1786 gcc_assert (CONSTANT_P (x));
1787 return 0;
1788 }
1789 }
1790 \f
1791 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1792 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1793 ignored by note_stores, but passed to FUN.
1794
1795 FUN receives three arguments:
1796 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1797 2. the SET or CLOBBER rtx that does the store,
1798 3. the pointer DATA provided to note_stores.
1799
1800 If the item being stored in or clobbered is a SUBREG of a hard register,
1801 the SUBREG will be passed. */
1802
1803 void
1804 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1805 {
1806 int i;
1807
1808 if (GET_CODE (x) == COND_EXEC)
1809 x = COND_EXEC_CODE (x);
1810
1811 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1812 {
1813 rtx dest = SET_DEST (x);
1814
1815 while ((GET_CODE (dest) == SUBREG
1816 && (!REG_P (SUBREG_REG (dest))
1817 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1818 || GET_CODE (dest) == ZERO_EXTRACT
1819 || GET_CODE (dest) == STRICT_LOW_PART)
1820 dest = XEXP (dest, 0);
1821
1822 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1823 each of whose first operand is a register. */
1824 if (GET_CODE (dest) == PARALLEL)
1825 {
1826 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1827 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1828 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1829 }
1830 else
1831 (*fun) (dest, x, data);
1832 }
1833
1834 else if (GET_CODE (x) == PARALLEL)
1835 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1836 note_stores (XVECEXP (x, 0, i), fun, data);
1837 }
1838 \f
1839 /* Like notes_stores, but call FUN for each expression that is being
1840 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1841 FUN for each expression, not any interior subexpressions. FUN receives a
1842 pointer to the expression and the DATA passed to this function.
1843
1844 Note that this is not quite the same test as that done in reg_referenced_p
1845 since that considers something as being referenced if it is being
1846 partially set, while we do not. */
1847
1848 void
1849 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1850 {
1851 rtx body = *pbody;
1852 int i;
1853
1854 switch (GET_CODE (body))
1855 {
1856 case COND_EXEC:
1857 (*fun) (&COND_EXEC_TEST (body), data);
1858 note_uses (&COND_EXEC_CODE (body), fun, data);
1859 return;
1860
1861 case PARALLEL:
1862 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1863 note_uses (&XVECEXP (body, 0, i), fun, data);
1864 return;
1865
1866 case SEQUENCE:
1867 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1868 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1869 return;
1870
1871 case USE:
1872 (*fun) (&XEXP (body, 0), data);
1873 return;
1874
1875 case ASM_OPERANDS:
1876 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1877 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1878 return;
1879
1880 case TRAP_IF:
1881 (*fun) (&TRAP_CONDITION (body), data);
1882 return;
1883
1884 case PREFETCH:
1885 (*fun) (&XEXP (body, 0), data);
1886 return;
1887
1888 case UNSPEC:
1889 case UNSPEC_VOLATILE:
1890 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1891 (*fun) (&XVECEXP (body, 0, i), data);
1892 return;
1893
1894 case CLOBBER:
1895 if (MEM_P (XEXP (body, 0)))
1896 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1897 return;
1898
1899 case SET:
1900 {
1901 rtx dest = SET_DEST (body);
1902
1903 /* For sets we replace everything in source plus registers in memory
1904 expression in store and operands of a ZERO_EXTRACT. */
1905 (*fun) (&SET_SRC (body), data);
1906
1907 if (GET_CODE (dest) == ZERO_EXTRACT)
1908 {
1909 (*fun) (&XEXP (dest, 1), data);
1910 (*fun) (&XEXP (dest, 2), data);
1911 }
1912
1913 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1914 dest = XEXP (dest, 0);
1915
1916 if (MEM_P (dest))
1917 (*fun) (&XEXP (dest, 0), data);
1918 }
1919 return;
1920
1921 default:
1922 /* All the other possibilities never store. */
1923 (*fun) (pbody, data);
1924 return;
1925 }
1926 }
1927 \f
1928 /* Return nonzero if X's old contents don't survive after INSN.
1929 This will be true if X is (cc0) or if X is a register and
1930 X dies in INSN or because INSN entirely sets X.
1931
1932 "Entirely set" means set directly and not through a SUBREG, or
1933 ZERO_EXTRACT, so no trace of the old contents remains.
1934 Likewise, REG_INC does not count.
1935
1936 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1937 but for this use that makes no difference, since regs don't overlap
1938 during their lifetimes. Therefore, this function may be used
1939 at any time after deaths have been computed.
1940
1941 If REG is a hard reg that occupies multiple machine registers, this
1942 function will only return 1 if each of those registers will be replaced
1943 by INSN. */
1944
1945 int
1946 dead_or_set_p (const_rtx insn, const_rtx x)
1947 {
1948 unsigned int regno, end_regno;
1949 unsigned int i;
1950
1951 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1952 if (GET_CODE (x) == CC0)
1953 return 1;
1954
1955 gcc_assert (REG_P (x));
1956
1957 regno = REGNO (x);
1958 end_regno = END_REGNO (x);
1959 for (i = regno; i < end_regno; i++)
1960 if (! dead_or_set_regno_p (insn, i))
1961 return 0;
1962
1963 return 1;
1964 }
1965
1966 /* Return TRUE iff DEST is a register or subreg of a register and
1967 doesn't change the number of words of the inner register, and any
1968 part of the register is TEST_REGNO. */
1969
1970 static bool
1971 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1972 {
1973 unsigned int regno, endregno;
1974
1975 if (GET_CODE (dest) == SUBREG
1976 && (((GET_MODE_SIZE (GET_MODE (dest))
1977 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1978 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1979 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1980 dest = SUBREG_REG (dest);
1981
1982 if (!REG_P (dest))
1983 return false;
1984
1985 regno = REGNO (dest);
1986 endregno = END_REGNO (dest);
1987 return (test_regno >= regno && test_regno < endregno);
1988 }
1989
1990 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1991 any member matches the covers_regno_no_parallel_p criteria. */
1992
1993 static bool
1994 covers_regno_p (const_rtx dest, unsigned int test_regno)
1995 {
1996 if (GET_CODE (dest) == PARALLEL)
1997 {
1998 /* Some targets place small structures in registers for return
1999 values of functions, and those registers are wrapped in
2000 PARALLELs that we may see as the destination of a SET. */
2001 int i;
2002
2003 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2004 {
2005 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2006 if (inner != NULL_RTX
2007 && covers_regno_no_parallel_p (inner, test_regno))
2008 return true;
2009 }
2010
2011 return false;
2012 }
2013 else
2014 return covers_regno_no_parallel_p (dest, test_regno);
2015 }
2016
2017 /* Utility function for dead_or_set_p to check an individual register. */
2018
2019 int
2020 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
2021 {
2022 const_rtx pattern;
2023
2024 /* See if there is a death note for something that includes TEST_REGNO. */
2025 if (find_regno_note (insn, REG_DEAD, test_regno))
2026 return 1;
2027
2028 if (CALL_P (insn)
2029 && find_regno_fusage (insn, CLOBBER, test_regno))
2030 return 1;
2031
2032 pattern = PATTERN (insn);
2033
2034 /* If a COND_EXEC is not executed, the value survives. */
2035 if (GET_CODE (pattern) == COND_EXEC)
2036 return 0;
2037
2038 if (GET_CODE (pattern) == SET)
2039 return covers_regno_p (SET_DEST (pattern), test_regno);
2040 else if (GET_CODE (pattern) == PARALLEL)
2041 {
2042 int i;
2043
2044 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2045 {
2046 rtx body = XVECEXP (pattern, 0, i);
2047
2048 if (GET_CODE (body) == COND_EXEC)
2049 body = COND_EXEC_CODE (body);
2050
2051 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2052 && covers_regno_p (SET_DEST (body), test_regno))
2053 return 1;
2054 }
2055 }
2056
2057 return 0;
2058 }
2059
2060 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2061 If DATUM is nonzero, look for one whose datum is DATUM. */
2062
2063 rtx
2064 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2065 {
2066 rtx link;
2067
2068 gcc_checking_assert (insn);
2069
2070 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2071 if (! INSN_P (insn))
2072 return 0;
2073 if (datum == 0)
2074 {
2075 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2076 if (REG_NOTE_KIND (link) == kind)
2077 return link;
2078 return 0;
2079 }
2080
2081 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2082 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2083 return link;
2084 return 0;
2085 }
2086
2087 /* Return the reg-note of kind KIND in insn INSN which applies to register
2088 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2089 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2090 it might be the case that the note overlaps REGNO. */
2091
2092 rtx
2093 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2094 {
2095 rtx link;
2096
2097 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2098 if (! INSN_P (insn))
2099 return 0;
2100
2101 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2102 if (REG_NOTE_KIND (link) == kind
2103 /* Verify that it is a register, so that scratch and MEM won't cause a
2104 problem here. */
2105 && REG_P (XEXP (link, 0))
2106 && REGNO (XEXP (link, 0)) <= regno
2107 && END_REGNO (XEXP (link, 0)) > regno)
2108 return link;
2109 return 0;
2110 }
2111
2112 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2113 has such a note. */
2114
2115 rtx
2116 find_reg_equal_equiv_note (const_rtx insn)
2117 {
2118 rtx link;
2119
2120 if (!INSN_P (insn))
2121 return 0;
2122
2123 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2124 if (REG_NOTE_KIND (link) == REG_EQUAL
2125 || REG_NOTE_KIND (link) == REG_EQUIV)
2126 {
2127 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2128 insns that have multiple sets. Checking single_set to
2129 make sure of this is not the proper check, as explained
2130 in the comment in set_unique_reg_note.
2131
2132 This should be changed into an assert. */
2133 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2134 return 0;
2135 return link;
2136 }
2137 return NULL;
2138 }
2139
2140 /* Check whether INSN is a single_set whose source is known to be
2141 equivalent to a constant. Return that constant if so, otherwise
2142 return null. */
2143
2144 rtx
2145 find_constant_src (const rtx_insn *insn)
2146 {
2147 rtx note, set, x;
2148
2149 set = single_set (insn);
2150 if (set)
2151 {
2152 x = avoid_constant_pool_reference (SET_SRC (set));
2153 if (CONSTANT_P (x))
2154 return x;
2155 }
2156
2157 note = find_reg_equal_equiv_note (insn);
2158 if (note && CONSTANT_P (XEXP (note, 0)))
2159 return XEXP (note, 0);
2160
2161 return NULL_RTX;
2162 }
2163
2164 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2165 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2166
2167 int
2168 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2169 {
2170 /* If it's not a CALL_INSN, it can't possibly have a
2171 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2172 if (!CALL_P (insn))
2173 return 0;
2174
2175 gcc_assert (datum);
2176
2177 if (!REG_P (datum))
2178 {
2179 rtx link;
2180
2181 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2182 link;
2183 link = XEXP (link, 1))
2184 if (GET_CODE (XEXP (link, 0)) == code
2185 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2186 return 1;
2187 }
2188 else
2189 {
2190 unsigned int regno = REGNO (datum);
2191
2192 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2193 to pseudo registers, so don't bother checking. */
2194
2195 if (regno < FIRST_PSEUDO_REGISTER)
2196 {
2197 unsigned int end_regno = END_REGNO (datum);
2198 unsigned int i;
2199
2200 for (i = regno; i < end_regno; i++)
2201 if (find_regno_fusage (insn, code, i))
2202 return 1;
2203 }
2204 }
2205
2206 return 0;
2207 }
2208
2209 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2210 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2211
2212 int
2213 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2214 {
2215 rtx link;
2216
2217 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2218 to pseudo registers, so don't bother checking. */
2219
2220 if (regno >= FIRST_PSEUDO_REGISTER
2221 || !CALL_P (insn) )
2222 return 0;
2223
2224 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2225 {
2226 rtx op, reg;
2227
2228 if (GET_CODE (op = XEXP (link, 0)) == code
2229 && REG_P (reg = XEXP (op, 0))
2230 && REGNO (reg) <= regno
2231 && END_REGNO (reg) > regno)
2232 return 1;
2233 }
2234
2235 return 0;
2236 }
2237
2238 \f
2239 /* Return true if KIND is an integer REG_NOTE. */
2240
2241 static bool
2242 int_reg_note_p (enum reg_note kind)
2243 {
2244 return kind == REG_BR_PROB;
2245 }
2246
2247 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2248 stored as the pointer to the next register note. */
2249
2250 rtx
2251 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2252 {
2253 rtx note;
2254
2255 gcc_checking_assert (!int_reg_note_p (kind));
2256 switch (kind)
2257 {
2258 case REG_CC_SETTER:
2259 case REG_CC_USER:
2260 case REG_LABEL_TARGET:
2261 case REG_LABEL_OPERAND:
2262 case REG_TM:
2263 /* These types of register notes use an INSN_LIST rather than an
2264 EXPR_LIST, so that copying is done right and dumps look
2265 better. */
2266 note = alloc_INSN_LIST (datum, list);
2267 PUT_REG_NOTE_KIND (note, kind);
2268 break;
2269
2270 default:
2271 note = alloc_EXPR_LIST (kind, datum, list);
2272 break;
2273 }
2274
2275 return note;
2276 }
2277
2278 /* Add register note with kind KIND and datum DATUM to INSN. */
2279
2280 void
2281 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2282 {
2283 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2284 }
2285
2286 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2287
2288 void
2289 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2290 {
2291 gcc_checking_assert (int_reg_note_p (kind));
2292 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2293 datum, REG_NOTES (insn));
2294 }
2295
2296 /* Add a register note like NOTE to INSN. */
2297
2298 void
2299 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2300 {
2301 if (GET_CODE (note) == INT_LIST)
2302 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2303 else
2304 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2305 }
2306
2307 /* Duplicate NOTE and return the copy. */
2308 rtx
2309 duplicate_reg_note (rtx note)
2310 {
2311 reg_note kind = REG_NOTE_KIND (note);
2312
2313 if (GET_CODE (note) == INT_LIST)
2314 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2315 else if (GET_CODE (note) == EXPR_LIST)
2316 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2317 else
2318 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2319 }
2320
2321 /* Remove register note NOTE from the REG_NOTES of INSN. */
2322
2323 void
2324 remove_note (rtx_insn *insn, const_rtx note)
2325 {
2326 rtx link;
2327
2328 if (note == NULL_RTX)
2329 return;
2330
2331 if (REG_NOTES (insn) == note)
2332 REG_NOTES (insn) = XEXP (note, 1);
2333 else
2334 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2335 if (XEXP (link, 1) == note)
2336 {
2337 XEXP (link, 1) = XEXP (note, 1);
2338 break;
2339 }
2340
2341 switch (REG_NOTE_KIND (note))
2342 {
2343 case REG_EQUAL:
2344 case REG_EQUIV:
2345 df_notes_rescan (insn);
2346 break;
2347 default:
2348 break;
2349 }
2350 }
2351
2352 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2353
2354 void
2355 remove_reg_equal_equiv_notes (rtx_insn *insn)
2356 {
2357 rtx *loc;
2358
2359 loc = &REG_NOTES (insn);
2360 while (*loc)
2361 {
2362 enum reg_note kind = REG_NOTE_KIND (*loc);
2363 if (kind == REG_EQUAL || kind == REG_EQUIV)
2364 *loc = XEXP (*loc, 1);
2365 else
2366 loc = &XEXP (*loc, 1);
2367 }
2368 }
2369
2370 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2371
2372 void
2373 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2374 {
2375 df_ref eq_use;
2376
2377 if (!df)
2378 return;
2379
2380 /* This loop is a little tricky. We cannot just go down the chain because
2381 it is being modified by some actions in the loop. So we just iterate
2382 over the head. We plan to drain the list anyway. */
2383 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2384 {
2385 rtx_insn *insn = DF_REF_INSN (eq_use);
2386 rtx note = find_reg_equal_equiv_note (insn);
2387
2388 /* This assert is generally triggered when someone deletes a REG_EQUAL
2389 or REG_EQUIV note by hacking the list manually rather than calling
2390 remove_note. */
2391 gcc_assert (note);
2392
2393 remove_note (insn, note);
2394 }
2395 }
2396
2397 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2398 return 1 if it is found. A simple equality test is used to determine if
2399 NODE matches. */
2400
2401 bool
2402 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2403 {
2404 const_rtx x;
2405
2406 for (x = listp; x; x = XEXP (x, 1))
2407 if (node == XEXP (x, 0))
2408 return true;
2409
2410 return false;
2411 }
2412
2413 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2414 remove that entry from the list if it is found.
2415
2416 A simple equality test is used to determine if NODE matches. */
2417
2418 void
2419 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2420 {
2421 rtx_expr_list *temp = *listp;
2422 rtx_expr_list *prev = NULL;
2423
2424 while (temp)
2425 {
2426 if (node == temp->element ())
2427 {
2428 /* Splice the node out of the list. */
2429 if (prev)
2430 XEXP (prev, 1) = temp->next ();
2431 else
2432 *listp = temp->next ();
2433
2434 return;
2435 }
2436
2437 prev = temp;
2438 temp = temp->next ();
2439 }
2440 }
2441
2442 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2443 remove that entry from the list if it is found.
2444
2445 A simple equality test is used to determine if NODE matches. */
2446
2447 void
2448 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2449 {
2450 rtx_insn_list *temp = *listp;
2451 rtx_insn_list *prev = NULL;
2452
2453 while (temp)
2454 {
2455 if (node == temp->insn ())
2456 {
2457 /* Splice the node out of the list. */
2458 if (prev)
2459 XEXP (prev, 1) = temp->next ();
2460 else
2461 *listp = temp->next ();
2462
2463 return;
2464 }
2465
2466 prev = temp;
2467 temp = temp->next ();
2468 }
2469 }
2470 \f
2471 /* Nonzero if X contains any volatile instructions. These are instructions
2472 which may cause unpredictable machine state instructions, and thus no
2473 instructions or register uses should be moved or combined across them.
2474 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2475
2476 int
2477 volatile_insn_p (const_rtx x)
2478 {
2479 const RTX_CODE code = GET_CODE (x);
2480 switch (code)
2481 {
2482 case LABEL_REF:
2483 case SYMBOL_REF:
2484 case CONST:
2485 CASE_CONST_ANY:
2486 case CC0:
2487 case PC:
2488 case REG:
2489 case SCRATCH:
2490 case CLOBBER:
2491 case ADDR_VEC:
2492 case ADDR_DIFF_VEC:
2493 case CALL:
2494 case MEM:
2495 return 0;
2496
2497 case UNSPEC_VOLATILE:
2498 return 1;
2499
2500 case ASM_INPUT:
2501 case ASM_OPERANDS:
2502 if (MEM_VOLATILE_P (x))
2503 return 1;
2504
2505 default:
2506 break;
2507 }
2508
2509 /* Recursively scan the operands of this expression. */
2510
2511 {
2512 const char *const fmt = GET_RTX_FORMAT (code);
2513 int i;
2514
2515 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2516 {
2517 if (fmt[i] == 'e')
2518 {
2519 if (volatile_insn_p (XEXP (x, i)))
2520 return 1;
2521 }
2522 else if (fmt[i] == 'E')
2523 {
2524 int j;
2525 for (j = 0; j < XVECLEN (x, i); j++)
2526 if (volatile_insn_p (XVECEXP (x, i, j)))
2527 return 1;
2528 }
2529 }
2530 }
2531 return 0;
2532 }
2533
2534 /* Nonzero if X contains any volatile memory references
2535 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2536
2537 int
2538 volatile_refs_p (const_rtx x)
2539 {
2540 const RTX_CODE code = GET_CODE (x);
2541 switch (code)
2542 {
2543 case LABEL_REF:
2544 case SYMBOL_REF:
2545 case CONST:
2546 CASE_CONST_ANY:
2547 case CC0:
2548 case PC:
2549 case REG:
2550 case SCRATCH:
2551 case CLOBBER:
2552 case ADDR_VEC:
2553 case ADDR_DIFF_VEC:
2554 return 0;
2555
2556 case UNSPEC_VOLATILE:
2557 return 1;
2558
2559 case MEM:
2560 case ASM_INPUT:
2561 case ASM_OPERANDS:
2562 if (MEM_VOLATILE_P (x))
2563 return 1;
2564
2565 default:
2566 break;
2567 }
2568
2569 /* Recursively scan the operands of this expression. */
2570
2571 {
2572 const char *const fmt = GET_RTX_FORMAT (code);
2573 int i;
2574
2575 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2576 {
2577 if (fmt[i] == 'e')
2578 {
2579 if (volatile_refs_p (XEXP (x, i)))
2580 return 1;
2581 }
2582 else if (fmt[i] == 'E')
2583 {
2584 int j;
2585 for (j = 0; j < XVECLEN (x, i); j++)
2586 if (volatile_refs_p (XVECEXP (x, i, j)))
2587 return 1;
2588 }
2589 }
2590 }
2591 return 0;
2592 }
2593
2594 /* Similar to above, except that it also rejects register pre- and post-
2595 incrementing. */
2596
2597 int
2598 side_effects_p (const_rtx x)
2599 {
2600 const RTX_CODE code = GET_CODE (x);
2601 switch (code)
2602 {
2603 case LABEL_REF:
2604 case SYMBOL_REF:
2605 case CONST:
2606 CASE_CONST_ANY:
2607 case CC0:
2608 case PC:
2609 case REG:
2610 case SCRATCH:
2611 case ADDR_VEC:
2612 case ADDR_DIFF_VEC:
2613 case VAR_LOCATION:
2614 return 0;
2615
2616 case CLOBBER:
2617 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2618 when some combination can't be done. If we see one, don't think
2619 that we can simplify the expression. */
2620 return (GET_MODE (x) != VOIDmode);
2621
2622 case PRE_INC:
2623 case PRE_DEC:
2624 case POST_INC:
2625 case POST_DEC:
2626 case PRE_MODIFY:
2627 case POST_MODIFY:
2628 case CALL:
2629 case UNSPEC_VOLATILE:
2630 return 1;
2631
2632 case MEM:
2633 case ASM_INPUT:
2634 case ASM_OPERANDS:
2635 if (MEM_VOLATILE_P (x))
2636 return 1;
2637
2638 default:
2639 break;
2640 }
2641
2642 /* Recursively scan the operands of this expression. */
2643
2644 {
2645 const char *fmt = GET_RTX_FORMAT (code);
2646 int i;
2647
2648 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2649 {
2650 if (fmt[i] == 'e')
2651 {
2652 if (side_effects_p (XEXP (x, i)))
2653 return 1;
2654 }
2655 else if (fmt[i] == 'E')
2656 {
2657 int j;
2658 for (j = 0; j < XVECLEN (x, i); j++)
2659 if (side_effects_p (XVECEXP (x, i, j)))
2660 return 1;
2661 }
2662 }
2663 }
2664 return 0;
2665 }
2666 \f
2667 /* Return nonzero if evaluating rtx X might cause a trap.
2668 FLAGS controls how to consider MEMs. A nonzero means the context
2669 of the access may have changed from the original, such that the
2670 address may have become invalid. */
2671
2672 int
2673 may_trap_p_1 (const_rtx x, unsigned flags)
2674 {
2675 int i;
2676 enum rtx_code code;
2677 const char *fmt;
2678
2679 /* We make no distinction currently, but this function is part of
2680 the internal target-hooks ABI so we keep the parameter as
2681 "unsigned flags". */
2682 bool code_changed = flags != 0;
2683
2684 if (x == 0)
2685 return 0;
2686 code = GET_CODE (x);
2687 switch (code)
2688 {
2689 /* Handle these cases quickly. */
2690 CASE_CONST_ANY:
2691 case SYMBOL_REF:
2692 case LABEL_REF:
2693 case CONST:
2694 case PC:
2695 case CC0:
2696 case REG:
2697 case SCRATCH:
2698 return 0;
2699
2700 case UNSPEC:
2701 return targetm.unspec_may_trap_p (x, flags);
2702
2703 case UNSPEC_VOLATILE:
2704 case ASM_INPUT:
2705 case TRAP_IF:
2706 return 1;
2707
2708 case ASM_OPERANDS:
2709 return MEM_VOLATILE_P (x);
2710
2711 /* Memory ref can trap unless it's a static var or a stack slot. */
2712 case MEM:
2713 /* Recognize specific pattern of stack checking probes. */
2714 if (flag_stack_check
2715 && MEM_VOLATILE_P (x)
2716 && XEXP (x, 0) == stack_pointer_rtx)
2717 return 1;
2718 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2719 reference; moving it out of context such as when moving code
2720 when optimizing, might cause its address to become invalid. */
2721 code_changed
2722 || !MEM_NOTRAP_P (x))
2723 {
2724 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2725 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2726 GET_MODE (x), code_changed);
2727 }
2728
2729 return 0;
2730
2731 /* Division by a non-constant might trap. */
2732 case DIV:
2733 case MOD:
2734 case UDIV:
2735 case UMOD:
2736 if (HONOR_SNANS (x))
2737 return 1;
2738 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2739 return flag_trapping_math;
2740 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2741 return 1;
2742 break;
2743
2744 case EXPR_LIST:
2745 /* An EXPR_LIST is used to represent a function call. This
2746 certainly may trap. */
2747 return 1;
2748
2749 case GE:
2750 case GT:
2751 case LE:
2752 case LT:
2753 case LTGT:
2754 case COMPARE:
2755 /* Some floating point comparisons may trap. */
2756 if (!flag_trapping_math)
2757 break;
2758 /* ??? There is no machine independent way to check for tests that trap
2759 when COMPARE is used, though many targets do make this distinction.
2760 For instance, sparc uses CCFPE for compares which generate exceptions
2761 and CCFP for compares which do not generate exceptions. */
2762 if (HONOR_NANS (x))
2763 return 1;
2764 /* But often the compare has some CC mode, so check operand
2765 modes as well. */
2766 if (HONOR_NANS (XEXP (x, 0))
2767 || HONOR_NANS (XEXP (x, 1)))
2768 return 1;
2769 break;
2770
2771 case EQ:
2772 case NE:
2773 if (HONOR_SNANS (x))
2774 return 1;
2775 /* Often comparison is CC mode, so check operand modes. */
2776 if (HONOR_SNANS (XEXP (x, 0))
2777 || HONOR_SNANS (XEXP (x, 1)))
2778 return 1;
2779 break;
2780
2781 case FIX:
2782 /* Conversion of floating point might trap. */
2783 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2784 return 1;
2785 break;
2786
2787 case NEG:
2788 case ABS:
2789 case SUBREG:
2790 /* These operations don't trap even with floating point. */
2791 break;
2792
2793 default:
2794 /* Any floating arithmetic may trap. */
2795 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2796 return 1;
2797 }
2798
2799 fmt = GET_RTX_FORMAT (code);
2800 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2801 {
2802 if (fmt[i] == 'e')
2803 {
2804 if (may_trap_p_1 (XEXP (x, i), flags))
2805 return 1;
2806 }
2807 else if (fmt[i] == 'E')
2808 {
2809 int j;
2810 for (j = 0; j < XVECLEN (x, i); j++)
2811 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2812 return 1;
2813 }
2814 }
2815 return 0;
2816 }
2817
2818 /* Return nonzero if evaluating rtx X might cause a trap. */
2819
2820 int
2821 may_trap_p (const_rtx x)
2822 {
2823 return may_trap_p_1 (x, 0);
2824 }
2825
2826 /* Same as above, but additionally return nonzero if evaluating rtx X might
2827 cause a fault. We define a fault for the purpose of this function as a
2828 erroneous execution condition that cannot be encountered during the normal
2829 execution of a valid program; the typical example is an unaligned memory
2830 access on a strict alignment machine. The compiler guarantees that it
2831 doesn't generate code that will fault from a valid program, but this
2832 guarantee doesn't mean anything for individual instructions. Consider
2833 the following example:
2834
2835 struct S { int d; union { char *cp; int *ip; }; };
2836
2837 int foo(struct S *s)
2838 {
2839 if (s->d == 1)
2840 return *s->ip;
2841 else
2842 return *s->cp;
2843 }
2844
2845 on a strict alignment machine. In a valid program, foo will never be
2846 invoked on a structure for which d is equal to 1 and the underlying
2847 unique field of the union not aligned on a 4-byte boundary, but the
2848 expression *s->ip might cause a fault if considered individually.
2849
2850 At the RTL level, potentially problematic expressions will almost always
2851 verify may_trap_p; for example, the above dereference can be emitted as
2852 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2853 However, suppose that foo is inlined in a caller that causes s->cp to
2854 point to a local character variable and guarantees that s->d is not set
2855 to 1; foo may have been effectively translated into pseudo-RTL as:
2856
2857 if ((reg:SI) == 1)
2858 (set (reg:SI) (mem:SI (%fp - 7)))
2859 else
2860 (set (reg:QI) (mem:QI (%fp - 7)))
2861
2862 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2863 memory reference to a stack slot, but it will certainly cause a fault
2864 on a strict alignment machine. */
2865
2866 int
2867 may_trap_or_fault_p (const_rtx x)
2868 {
2869 return may_trap_p_1 (x, 1);
2870 }
2871 \f
2872 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2873 i.e., an inequality. */
2874
2875 int
2876 inequality_comparisons_p (const_rtx x)
2877 {
2878 const char *fmt;
2879 int len, i;
2880 const enum rtx_code code = GET_CODE (x);
2881
2882 switch (code)
2883 {
2884 case REG:
2885 case SCRATCH:
2886 case PC:
2887 case CC0:
2888 CASE_CONST_ANY:
2889 case CONST:
2890 case LABEL_REF:
2891 case SYMBOL_REF:
2892 return 0;
2893
2894 case LT:
2895 case LTU:
2896 case GT:
2897 case GTU:
2898 case LE:
2899 case LEU:
2900 case GE:
2901 case GEU:
2902 return 1;
2903
2904 default:
2905 break;
2906 }
2907
2908 len = GET_RTX_LENGTH (code);
2909 fmt = GET_RTX_FORMAT (code);
2910
2911 for (i = 0; i < len; i++)
2912 {
2913 if (fmt[i] == 'e')
2914 {
2915 if (inequality_comparisons_p (XEXP (x, i)))
2916 return 1;
2917 }
2918 else if (fmt[i] == 'E')
2919 {
2920 int j;
2921 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2922 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2923 return 1;
2924 }
2925 }
2926
2927 return 0;
2928 }
2929 \f
2930 /* Replace any occurrence of FROM in X with TO. The function does
2931 not enter into CONST_DOUBLE for the replace.
2932
2933 Note that copying is not done so X must not be shared unless all copies
2934 are to be modified.
2935
2936 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2937 those pointer-equal ones. */
2938
2939 rtx
2940 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
2941 {
2942 int i, j;
2943 const char *fmt;
2944
2945 if (x == from)
2946 return to;
2947
2948 /* Allow this function to make replacements in EXPR_LISTs. */
2949 if (x == 0)
2950 return 0;
2951
2952 if (all_regs
2953 && REG_P (x)
2954 && REG_P (from)
2955 && REGNO (x) == REGNO (from))
2956 {
2957 gcc_assert (GET_MODE (x) == GET_MODE (from));
2958 return to;
2959 }
2960 else if (GET_CODE (x) == SUBREG)
2961 {
2962 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
2963
2964 if (CONST_INT_P (new_rtx))
2965 {
2966 x = simplify_subreg (GET_MODE (x), new_rtx,
2967 GET_MODE (SUBREG_REG (x)),
2968 SUBREG_BYTE (x));
2969 gcc_assert (x);
2970 }
2971 else
2972 SUBREG_REG (x) = new_rtx;
2973
2974 return x;
2975 }
2976 else if (GET_CODE (x) == ZERO_EXTEND)
2977 {
2978 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
2979
2980 if (CONST_INT_P (new_rtx))
2981 {
2982 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2983 new_rtx, GET_MODE (XEXP (x, 0)));
2984 gcc_assert (x);
2985 }
2986 else
2987 XEXP (x, 0) = new_rtx;
2988
2989 return x;
2990 }
2991
2992 fmt = GET_RTX_FORMAT (GET_CODE (x));
2993 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2994 {
2995 if (fmt[i] == 'e')
2996 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
2997 else if (fmt[i] == 'E')
2998 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2999 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3000 from, to, all_regs);
3001 }
3002
3003 return x;
3004 }
3005 \f
3006 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3007 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3008
3009 void
3010 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3011 {
3012 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3013 rtx x = *loc;
3014 if (JUMP_TABLE_DATA_P (x))
3015 {
3016 x = PATTERN (x);
3017 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3018 int len = GET_NUM_ELEM (vec);
3019 for (int i = 0; i < len; ++i)
3020 {
3021 rtx ref = RTVEC_ELT (vec, i);
3022 if (XEXP (ref, 0) == old_label)
3023 {
3024 XEXP (ref, 0) = new_label;
3025 if (update_label_nuses)
3026 {
3027 ++LABEL_NUSES (new_label);
3028 --LABEL_NUSES (old_label);
3029 }
3030 }
3031 }
3032 return;
3033 }
3034
3035 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3036 field. This is not handled by the iterator because it doesn't
3037 handle unprinted ('0') fields. */
3038 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3039 JUMP_LABEL (x) = new_label;
3040
3041 subrtx_ptr_iterator::array_type array;
3042 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3043 {
3044 rtx *loc = *iter;
3045 if (rtx x = *loc)
3046 {
3047 if (GET_CODE (x) == SYMBOL_REF
3048 && CONSTANT_POOL_ADDRESS_P (x))
3049 {
3050 rtx c = get_pool_constant (x);
3051 if (rtx_referenced_p (old_label, c))
3052 {
3053 /* Create a copy of constant C; replace the label inside
3054 but do not update LABEL_NUSES because uses in constant pool
3055 are not counted. */
3056 rtx new_c = copy_rtx (c);
3057 replace_label (&new_c, old_label, new_label, false);
3058
3059 /* Add the new constant NEW_C to constant pool and replace
3060 the old reference to constant by new reference. */
3061 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3062 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3063 }
3064 }
3065
3066 if ((GET_CODE (x) == LABEL_REF
3067 || GET_CODE (x) == INSN_LIST)
3068 && XEXP (x, 0) == old_label)
3069 {
3070 XEXP (x, 0) = new_label;
3071 if (update_label_nuses)
3072 {
3073 ++LABEL_NUSES (new_label);
3074 --LABEL_NUSES (old_label);
3075 }
3076 }
3077 }
3078 }
3079 }
3080
3081 void
3082 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
3083 bool update_label_nuses)
3084 {
3085 rtx insn_as_rtx = insn;
3086 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3087 gcc_checking_assert (insn_as_rtx == insn);
3088 }
3089
3090 /* Return true if X is referenced in BODY. */
3091
3092 bool
3093 rtx_referenced_p (const_rtx x, const_rtx body)
3094 {
3095 subrtx_iterator::array_type array;
3096 FOR_EACH_SUBRTX (iter, array, body, ALL)
3097 if (const_rtx y = *iter)
3098 {
3099 /* Check if a label_ref Y refers to label X. */
3100 if (GET_CODE (y) == LABEL_REF
3101 && LABEL_P (x)
3102 && label_ref_label (y) == x)
3103 return true;
3104
3105 if (rtx_equal_p (x, y))
3106 return true;
3107
3108 /* If Y is a reference to pool constant traverse the constant. */
3109 if (GET_CODE (y) == SYMBOL_REF
3110 && CONSTANT_POOL_ADDRESS_P (y))
3111 iter.substitute (get_pool_constant (y));
3112 }
3113 return false;
3114 }
3115
3116 /* If INSN is a tablejump return true and store the label (before jump table) to
3117 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3118
3119 bool
3120 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3121 rtx_jump_table_data **tablep)
3122 {
3123 if (!JUMP_P (insn))
3124 return false;
3125
3126 rtx target = JUMP_LABEL (insn);
3127 if (target == NULL_RTX || ANY_RETURN_P (target))
3128 return false;
3129
3130 rtx_insn *label = as_a<rtx_insn *> (target);
3131 rtx_insn *table = next_insn (label);
3132 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3133 return false;
3134
3135 if (labelp)
3136 *labelp = label;
3137 if (tablep)
3138 *tablep = as_a <rtx_jump_table_data *> (table);
3139 return true;
3140 }
3141
3142 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3143 constant that is not in the constant pool and not in the condition
3144 of an IF_THEN_ELSE. */
3145
3146 static int
3147 computed_jump_p_1 (const_rtx x)
3148 {
3149 const enum rtx_code code = GET_CODE (x);
3150 int i, j;
3151 const char *fmt;
3152
3153 switch (code)
3154 {
3155 case LABEL_REF:
3156 case PC:
3157 return 0;
3158
3159 case CONST:
3160 CASE_CONST_ANY:
3161 case SYMBOL_REF:
3162 case REG:
3163 return 1;
3164
3165 case MEM:
3166 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3167 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3168
3169 case IF_THEN_ELSE:
3170 return (computed_jump_p_1 (XEXP (x, 1))
3171 || computed_jump_p_1 (XEXP (x, 2)));
3172
3173 default:
3174 break;
3175 }
3176
3177 fmt = GET_RTX_FORMAT (code);
3178 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3179 {
3180 if (fmt[i] == 'e'
3181 && computed_jump_p_1 (XEXP (x, i)))
3182 return 1;
3183
3184 else if (fmt[i] == 'E')
3185 for (j = 0; j < XVECLEN (x, i); j++)
3186 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3187 return 1;
3188 }
3189
3190 return 0;
3191 }
3192
3193 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3194
3195 Tablejumps and casesi insns are not considered indirect jumps;
3196 we can recognize them by a (use (label_ref)). */
3197
3198 int
3199 computed_jump_p (const rtx_insn *insn)
3200 {
3201 int i;
3202 if (JUMP_P (insn))
3203 {
3204 rtx pat = PATTERN (insn);
3205
3206 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3207 if (JUMP_LABEL (insn) != NULL)
3208 return 0;
3209
3210 if (GET_CODE (pat) == PARALLEL)
3211 {
3212 int len = XVECLEN (pat, 0);
3213 int has_use_labelref = 0;
3214
3215 for (i = len - 1; i >= 0; i--)
3216 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3217 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3218 == LABEL_REF))
3219 {
3220 has_use_labelref = 1;
3221 break;
3222 }
3223
3224 if (! has_use_labelref)
3225 for (i = len - 1; i >= 0; i--)
3226 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3227 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3228 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3229 return 1;
3230 }
3231 else if (GET_CODE (pat) == SET
3232 && SET_DEST (pat) == pc_rtx
3233 && computed_jump_p_1 (SET_SRC (pat)))
3234 return 1;
3235 }
3236 return 0;
3237 }
3238
3239 \f
3240
3241 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3242 the equivalent add insn and pass the result to FN, using DATA as the
3243 final argument. */
3244
3245 static int
3246 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3247 {
3248 rtx x = XEXP (mem, 0);
3249 switch (GET_CODE (x))
3250 {
3251 case PRE_INC:
3252 case POST_INC:
3253 {
3254 int size = GET_MODE_SIZE (GET_MODE (mem));
3255 rtx r1 = XEXP (x, 0);
3256 rtx c = gen_int_mode (size, GET_MODE (r1));
3257 return fn (mem, x, r1, r1, c, data);
3258 }
3259
3260 case PRE_DEC:
3261 case POST_DEC:
3262 {
3263 int size = GET_MODE_SIZE (GET_MODE (mem));
3264 rtx r1 = XEXP (x, 0);
3265 rtx c = gen_int_mode (-size, GET_MODE (r1));
3266 return fn (mem, x, r1, r1, c, data);
3267 }
3268
3269 case PRE_MODIFY:
3270 case POST_MODIFY:
3271 {
3272 rtx r1 = XEXP (x, 0);
3273 rtx add = XEXP (x, 1);
3274 return fn (mem, x, r1, add, NULL, data);
3275 }
3276
3277 default:
3278 gcc_unreachable ();
3279 }
3280 }
3281
3282 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3283 For each such autoinc operation found, call FN, passing it
3284 the innermost enclosing MEM, the operation itself, the RTX modified
3285 by the operation, two RTXs (the second may be NULL) that, once
3286 added, represent the value to be held by the modified RTX
3287 afterwards, and DATA. FN is to return 0 to continue the
3288 traversal or any other value to have it returned to the caller of
3289 for_each_inc_dec. */
3290
3291 int
3292 for_each_inc_dec (rtx x,
3293 for_each_inc_dec_fn fn,
3294 void *data)
3295 {
3296 subrtx_var_iterator::array_type array;
3297 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3298 {
3299 rtx mem = *iter;
3300 if (mem
3301 && MEM_P (mem)
3302 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3303 {
3304 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3305 if (res != 0)
3306 return res;
3307 iter.skip_subrtxes ();
3308 }
3309 }
3310 return 0;
3311 }
3312
3313 \f
3314 /* Searches X for any reference to REGNO, returning the rtx of the
3315 reference found if any. Otherwise, returns NULL_RTX. */
3316
3317 rtx
3318 regno_use_in (unsigned int regno, rtx x)
3319 {
3320 const char *fmt;
3321 int i, j;
3322 rtx tem;
3323
3324 if (REG_P (x) && REGNO (x) == regno)
3325 return x;
3326
3327 fmt = GET_RTX_FORMAT (GET_CODE (x));
3328 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3329 {
3330 if (fmt[i] == 'e')
3331 {
3332 if ((tem = regno_use_in (regno, XEXP (x, i))))
3333 return tem;
3334 }
3335 else if (fmt[i] == 'E')
3336 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3337 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3338 return tem;
3339 }
3340
3341 return NULL_RTX;
3342 }
3343
3344 /* Return a value indicating whether OP, an operand of a commutative
3345 operation, is preferred as the first or second operand. The more
3346 positive the value, the stronger the preference for being the first
3347 operand. */
3348
3349 int
3350 commutative_operand_precedence (rtx op)
3351 {
3352 enum rtx_code code = GET_CODE (op);
3353
3354 /* Constants always become the second operand. Prefer "nice" constants. */
3355 if (code == CONST_INT)
3356 return -8;
3357 if (code == CONST_WIDE_INT)
3358 return -7;
3359 if (code == CONST_DOUBLE)
3360 return -7;
3361 if (code == CONST_FIXED)
3362 return -7;
3363 op = avoid_constant_pool_reference (op);
3364 code = GET_CODE (op);
3365
3366 switch (GET_RTX_CLASS (code))
3367 {
3368 case RTX_CONST_OBJ:
3369 if (code == CONST_INT)
3370 return -6;
3371 if (code == CONST_WIDE_INT)
3372 return -6;
3373 if (code == CONST_DOUBLE)
3374 return -5;
3375 if (code == CONST_FIXED)
3376 return -5;
3377 return -4;
3378
3379 case RTX_EXTRA:
3380 /* SUBREGs of objects should come second. */
3381 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3382 return -3;
3383 return 0;
3384
3385 case RTX_OBJ:
3386 /* Complex expressions should be the first, so decrease priority
3387 of objects. Prefer pointer objects over non pointer objects. */
3388 if ((REG_P (op) && REG_POINTER (op))
3389 || (MEM_P (op) && MEM_POINTER (op)))
3390 return -1;
3391 return -2;
3392
3393 case RTX_COMM_ARITH:
3394 /* Prefer operands that are themselves commutative to be first.
3395 This helps to make things linear. In particular,
3396 (and (and (reg) (reg)) (not (reg))) is canonical. */
3397 return 4;
3398
3399 case RTX_BIN_ARITH:
3400 /* If only one operand is a binary expression, it will be the first
3401 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3402 is canonical, although it will usually be further simplified. */
3403 return 2;
3404
3405 case RTX_UNARY:
3406 /* Then prefer NEG and NOT. */
3407 if (code == NEG || code == NOT)
3408 return 1;
3409 /* FALLTHRU */
3410
3411 default:
3412 return 0;
3413 }
3414 }
3415
3416 /* Return 1 iff it is necessary to swap operands of commutative operation
3417 in order to canonicalize expression. */
3418
3419 bool
3420 swap_commutative_operands_p (rtx x, rtx y)
3421 {
3422 return (commutative_operand_precedence (x)
3423 < commutative_operand_precedence (y));
3424 }
3425
3426 /* Return 1 if X is an autoincrement side effect and the register is
3427 not the stack pointer. */
3428 int
3429 auto_inc_p (const_rtx x)
3430 {
3431 switch (GET_CODE (x))
3432 {
3433 case PRE_INC:
3434 case POST_INC:
3435 case PRE_DEC:
3436 case POST_DEC:
3437 case PRE_MODIFY:
3438 case POST_MODIFY:
3439 /* There are no REG_INC notes for SP. */
3440 if (XEXP (x, 0) != stack_pointer_rtx)
3441 return 1;
3442 default:
3443 break;
3444 }
3445 return 0;
3446 }
3447
3448 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3449 int
3450 loc_mentioned_in_p (rtx *loc, const_rtx in)
3451 {
3452 enum rtx_code code;
3453 const char *fmt;
3454 int i, j;
3455
3456 if (!in)
3457 return 0;
3458
3459 code = GET_CODE (in);
3460 fmt = GET_RTX_FORMAT (code);
3461 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3462 {
3463 if (fmt[i] == 'e')
3464 {
3465 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3466 return 1;
3467 }
3468 else if (fmt[i] == 'E')
3469 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3470 if (loc == &XVECEXP (in, i, j)
3471 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3472 return 1;
3473 }
3474 return 0;
3475 }
3476
3477 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3478 and SUBREG_BYTE, return the bit offset where the subreg begins
3479 (counting from the least significant bit of the operand). */
3480
3481 unsigned int
3482 subreg_lsb_1 (machine_mode outer_mode,
3483 machine_mode inner_mode,
3484 unsigned int subreg_byte)
3485 {
3486 unsigned int bitpos;
3487 unsigned int byte;
3488 unsigned int word;
3489
3490 /* A paradoxical subreg begins at bit position 0. */
3491 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3492 return 0;
3493
3494 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3495 /* If the subreg crosses a word boundary ensure that
3496 it also begins and ends on a word boundary. */
3497 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3498 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3499 && (subreg_byte % UNITS_PER_WORD
3500 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3501
3502 if (WORDS_BIG_ENDIAN)
3503 word = (GET_MODE_SIZE (inner_mode)
3504 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3505 else
3506 word = subreg_byte / UNITS_PER_WORD;
3507 bitpos = word * BITS_PER_WORD;
3508
3509 if (BYTES_BIG_ENDIAN)
3510 byte = (GET_MODE_SIZE (inner_mode)
3511 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3512 else
3513 byte = subreg_byte % UNITS_PER_WORD;
3514 bitpos += byte * BITS_PER_UNIT;
3515
3516 return bitpos;
3517 }
3518
3519 /* Given a subreg X, return the bit offset where the subreg begins
3520 (counting from the least significant bit of the reg). */
3521
3522 unsigned int
3523 subreg_lsb (const_rtx x)
3524 {
3525 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3526 SUBREG_BYTE (x));
3527 }
3528
3529 /* Fill in information about a subreg of a hard register.
3530 xregno - A regno of an inner hard subreg_reg (or what will become one).
3531 xmode - The mode of xregno.
3532 offset - The byte offset.
3533 ymode - The mode of a top level SUBREG (or what may become one).
3534 info - Pointer to structure to fill in.
3535
3536 Rather than considering one particular inner register (and thus one
3537 particular "outer" register) in isolation, this function really uses
3538 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3539 function does not check whether adding INFO->offset to XREGNO gives
3540 a valid hard register; even if INFO->offset + XREGNO is out of range,
3541 there might be another register of the same type that is in range.
3542 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3543 register, since that can depend on things like whether the final
3544 register number is even or odd. Callers that want to check whether
3545 this particular subreg can be replaced by a simple (reg ...) should
3546 use simplify_subreg_regno. */
3547
3548 void
3549 subreg_get_info (unsigned int xregno, machine_mode xmode,
3550 unsigned int offset, machine_mode ymode,
3551 struct subreg_info *info)
3552 {
3553 int nregs_xmode, nregs_ymode;
3554 int mode_multiple, nregs_multiple;
3555 int offset_adj, y_offset, y_offset_adj;
3556 int regsize_xmode, regsize_ymode;
3557 bool rknown;
3558
3559 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3560
3561 rknown = false;
3562
3563 /* If there are holes in a non-scalar mode in registers, we expect
3564 that it is made up of its units concatenated together. */
3565 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3566 {
3567 machine_mode xmode_unit;
3568
3569 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3570 xmode_unit = GET_MODE_INNER (xmode);
3571 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3572 gcc_assert (nregs_xmode
3573 == (GET_MODE_NUNITS (xmode)
3574 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3575 gcc_assert (hard_regno_nregs[xregno][xmode]
3576 == (hard_regno_nregs[xregno][xmode_unit]
3577 * GET_MODE_NUNITS (xmode)));
3578
3579 /* You can only ask for a SUBREG of a value with holes in the middle
3580 if you don't cross the holes. (Such a SUBREG should be done by
3581 picking a different register class, or doing it in memory if
3582 necessary.) An example of a value with holes is XCmode on 32-bit
3583 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3584 3 for each part, but in memory it's two 128-bit parts.
3585 Padding is assumed to be at the end (not necessarily the 'high part')
3586 of each unit. */
3587 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3588 < GET_MODE_NUNITS (xmode))
3589 && (offset / GET_MODE_SIZE (xmode_unit)
3590 != ((offset + GET_MODE_SIZE (ymode) - 1)
3591 / GET_MODE_SIZE (xmode_unit))))
3592 {
3593 info->representable_p = false;
3594 rknown = true;
3595 }
3596 }
3597 else
3598 nregs_xmode = hard_regno_nregs[xregno][xmode];
3599
3600 nregs_ymode = hard_regno_nregs[xregno][ymode];
3601
3602 /* Paradoxical subregs are otherwise valid. */
3603 if (!rknown
3604 && offset == 0
3605 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3606 {
3607 info->representable_p = true;
3608 /* If this is a big endian paradoxical subreg, which uses more
3609 actual hard registers than the original register, we must
3610 return a negative offset so that we find the proper highpart
3611 of the register. */
3612 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3613 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3614 info->offset = nregs_xmode - nregs_ymode;
3615 else
3616 info->offset = 0;
3617 info->nregs = nregs_ymode;
3618 return;
3619 }
3620
3621 /* If registers store different numbers of bits in the different
3622 modes, we cannot generally form this subreg. */
3623 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3624 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3625 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3626 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3627 {
3628 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3629 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3630 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3631 {
3632 info->representable_p = false;
3633 info->nregs
3634 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3635 info->offset = offset / regsize_xmode;
3636 return;
3637 }
3638 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3639 {
3640 info->representable_p = false;
3641 info->nregs
3642 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3643 info->offset = offset / regsize_xmode;
3644 return;
3645 }
3646 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3647 would go outside of XMODE. */
3648 if (!rknown
3649 && GET_MODE_SIZE (ymode) + offset > GET_MODE_SIZE (xmode))
3650 {
3651 info->representable_p = false;
3652 info->nregs = nregs_ymode;
3653 info->offset = offset / regsize_xmode;
3654 return;
3655 }
3656 /* Quick exit for the simple and common case of extracting whole
3657 subregisters from a multiregister value. */
3658 /* ??? It would be better to integrate this into the code below,
3659 if we can generalize the concept enough and figure out how
3660 odd-sized modes can coexist with the other weird cases we support. */
3661 if (!rknown
3662 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3663 && regsize_xmode == regsize_ymode
3664 && (offset % regsize_ymode) == 0)
3665 {
3666 info->representable_p = true;
3667 info->nregs = nregs_ymode;
3668 info->offset = offset / regsize_ymode;
3669 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3670 return;
3671 }
3672 }
3673
3674 /* Lowpart subregs are otherwise valid. */
3675 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3676 {
3677 info->representable_p = true;
3678 rknown = true;
3679
3680 if (offset == 0 || nregs_xmode == nregs_ymode)
3681 {
3682 info->offset = 0;
3683 info->nregs = nregs_ymode;
3684 return;
3685 }
3686 }
3687
3688 /* This should always pass, otherwise we don't know how to verify
3689 the constraint. These conditions may be relaxed but
3690 subreg_regno_offset would need to be redesigned. */
3691 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3692 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3693
3694 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3695 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3696 {
3697 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3698 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3699 HOST_WIDE_INT off_low = offset & (ysize - 1);
3700 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3701 offset = (xsize - ysize - off_high) | off_low;
3702 }
3703 /* The XMODE value can be seen as a vector of NREGS_XMODE
3704 values. The subreg must represent a lowpart of given field.
3705 Compute what field it is. */
3706 offset_adj = offset;
3707 offset_adj -= subreg_lowpart_offset (ymode,
3708 mode_for_size (GET_MODE_BITSIZE (xmode)
3709 / nregs_xmode,
3710 MODE_INT, 0));
3711
3712 /* Size of ymode must not be greater than the size of xmode. */
3713 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3714 gcc_assert (mode_multiple != 0);
3715
3716 y_offset = offset / GET_MODE_SIZE (ymode);
3717 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3718 nregs_multiple = nregs_xmode / nregs_ymode;
3719
3720 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3721 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3722
3723 if (!rknown)
3724 {
3725 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3726 rknown = true;
3727 }
3728 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3729 info->nregs = nregs_ymode;
3730 }
3731
3732 /* This function returns the regno offset of a subreg expression.
3733 xregno - A regno of an inner hard subreg_reg (or what will become one).
3734 xmode - The mode of xregno.
3735 offset - The byte offset.
3736 ymode - The mode of a top level SUBREG (or what may become one).
3737 RETURN - The regno offset which would be used. */
3738 unsigned int
3739 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3740 unsigned int offset, machine_mode ymode)
3741 {
3742 struct subreg_info info;
3743 subreg_get_info (xregno, xmode, offset, ymode, &info);
3744 return info.offset;
3745 }
3746
3747 /* This function returns true when the offset is representable via
3748 subreg_offset in the given regno.
3749 xregno - A regno of an inner hard subreg_reg (or what will become one).
3750 xmode - The mode of xregno.
3751 offset - The byte offset.
3752 ymode - The mode of a top level SUBREG (or what may become one).
3753 RETURN - Whether the offset is representable. */
3754 bool
3755 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3756 unsigned int offset, machine_mode ymode)
3757 {
3758 struct subreg_info info;
3759 subreg_get_info (xregno, xmode, offset, ymode, &info);
3760 return info.representable_p;
3761 }
3762
3763 /* Return the number of a YMODE register to which
3764
3765 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3766
3767 can be simplified. Return -1 if the subreg can't be simplified.
3768
3769 XREGNO is a hard register number. */
3770
3771 int
3772 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3773 unsigned int offset, machine_mode ymode)
3774 {
3775 struct subreg_info info;
3776 unsigned int yregno;
3777
3778 #ifdef CANNOT_CHANGE_MODE_CLASS
3779 /* Give the backend a chance to disallow the mode change. */
3780 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3781 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3782 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3783 /* We can use mode change in LRA for some transformations. */
3784 && ! lra_in_progress)
3785 return -1;
3786 #endif
3787
3788 /* We shouldn't simplify stack-related registers. */
3789 if ((!reload_completed || frame_pointer_needed)
3790 && xregno == FRAME_POINTER_REGNUM)
3791 return -1;
3792
3793 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3794 && xregno == ARG_POINTER_REGNUM)
3795 return -1;
3796
3797 if (xregno == STACK_POINTER_REGNUM
3798 /* We should convert hard stack register in LRA if it is
3799 possible. */
3800 && ! lra_in_progress)
3801 return -1;
3802
3803 /* Try to get the register offset. */
3804 subreg_get_info (xregno, xmode, offset, ymode, &info);
3805 if (!info.representable_p)
3806 return -1;
3807
3808 /* Make sure that the offsetted register value is in range. */
3809 yregno = xregno + info.offset;
3810 if (!HARD_REGISTER_NUM_P (yregno))
3811 return -1;
3812
3813 /* See whether (reg:YMODE YREGNO) is valid.
3814
3815 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3816 This is a kludge to work around how complex FP arguments are passed
3817 on IA-64 and should be fixed. See PR target/49226. */
3818 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3819 && HARD_REGNO_MODE_OK (xregno, xmode))
3820 return -1;
3821
3822 return (int) yregno;
3823 }
3824
3825 /* Return the final regno that a subreg expression refers to. */
3826 unsigned int
3827 subreg_regno (const_rtx x)
3828 {
3829 unsigned int ret;
3830 rtx subreg = SUBREG_REG (x);
3831 int regno = REGNO (subreg);
3832
3833 ret = regno + subreg_regno_offset (regno,
3834 GET_MODE (subreg),
3835 SUBREG_BYTE (x),
3836 GET_MODE (x));
3837 return ret;
3838
3839 }
3840
3841 /* Return the number of registers that a subreg expression refers
3842 to. */
3843 unsigned int
3844 subreg_nregs (const_rtx x)
3845 {
3846 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3847 }
3848
3849 /* Return the number of registers that a subreg REG with REGNO
3850 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3851 changed so that the regno can be passed in. */
3852
3853 unsigned int
3854 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3855 {
3856 struct subreg_info info;
3857 rtx subreg = SUBREG_REG (x);
3858
3859 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3860 &info);
3861 return info.nregs;
3862 }
3863
3864
3865 struct parms_set_data
3866 {
3867 int nregs;
3868 HARD_REG_SET regs;
3869 };
3870
3871 /* Helper function for noticing stores to parameter registers. */
3872 static void
3873 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3874 {
3875 struct parms_set_data *const d = (struct parms_set_data *) data;
3876 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3877 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3878 {
3879 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3880 d->nregs--;
3881 }
3882 }
3883
3884 /* Look backward for first parameter to be loaded.
3885 Note that loads of all parameters will not necessarily be
3886 found if CSE has eliminated some of them (e.g., an argument
3887 to the outer function is passed down as a parameter).
3888 Do not skip BOUNDARY. */
3889 rtx_insn *
3890 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3891 {
3892 struct parms_set_data parm;
3893 rtx p;
3894 rtx_insn *before, *first_set;
3895
3896 /* Since different machines initialize their parameter registers
3897 in different orders, assume nothing. Collect the set of all
3898 parameter registers. */
3899 CLEAR_HARD_REG_SET (parm.regs);
3900 parm.nregs = 0;
3901 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3902 if (GET_CODE (XEXP (p, 0)) == USE
3903 && REG_P (XEXP (XEXP (p, 0), 0))
3904 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
3905 {
3906 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3907
3908 /* We only care about registers which can hold function
3909 arguments. */
3910 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3911 continue;
3912
3913 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3914 parm.nregs++;
3915 }
3916 before = call_insn;
3917 first_set = call_insn;
3918
3919 /* Search backward for the first set of a register in this set. */
3920 while (parm.nregs && before != boundary)
3921 {
3922 before = PREV_INSN (before);
3923
3924 /* It is possible that some loads got CSEed from one call to
3925 another. Stop in that case. */
3926 if (CALL_P (before))
3927 break;
3928
3929 /* Our caller needs either ensure that we will find all sets
3930 (in case code has not been optimized yet), or take care
3931 for possible labels in a way by setting boundary to preceding
3932 CODE_LABEL. */
3933 if (LABEL_P (before))
3934 {
3935 gcc_assert (before == boundary);
3936 break;
3937 }
3938
3939 if (INSN_P (before))
3940 {
3941 int nregs_old = parm.nregs;
3942 note_stores (PATTERN (before), parms_set, &parm);
3943 /* If we found something that did not set a parameter reg,
3944 we're done. Do not keep going, as that might result
3945 in hoisting an insn before the setting of a pseudo
3946 that is used by the hoisted insn. */
3947 if (nregs_old != parm.nregs)
3948 first_set = before;
3949 else
3950 break;
3951 }
3952 }
3953 return first_set;
3954 }
3955
3956 /* Return true if we should avoid inserting code between INSN and preceding
3957 call instruction. */
3958
3959 bool
3960 keep_with_call_p (const rtx_insn *insn)
3961 {
3962 rtx set;
3963
3964 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3965 {
3966 if (REG_P (SET_DEST (set))
3967 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3968 && fixed_regs[REGNO (SET_DEST (set))]
3969 && general_operand (SET_SRC (set), VOIDmode))
3970 return true;
3971 if (REG_P (SET_SRC (set))
3972 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3973 && REG_P (SET_DEST (set))
3974 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3975 return true;
3976 /* There may be a stack pop just after the call and before the store
3977 of the return register. Search for the actual store when deciding
3978 if we can break or not. */
3979 if (SET_DEST (set) == stack_pointer_rtx)
3980 {
3981 /* This CONST_CAST is okay because next_nonnote_insn just
3982 returns its argument and we assign it to a const_rtx
3983 variable. */
3984 const rtx_insn *i2
3985 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3986 if (i2 && keep_with_call_p (i2))
3987 return true;
3988 }
3989 }
3990 return false;
3991 }
3992
3993 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3994 to non-complex jumps. That is, direct unconditional, conditional,
3995 and tablejumps, but not computed jumps or returns. It also does
3996 not apply to the fallthru case of a conditional jump. */
3997
3998 bool
3999 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4000 {
4001 rtx tmp = JUMP_LABEL (jump_insn);
4002 rtx_jump_table_data *table;
4003
4004 if (label == tmp)
4005 return true;
4006
4007 if (tablejump_p (jump_insn, NULL, &table))
4008 {
4009 rtvec vec = table->get_labels ();
4010 int i, veclen = GET_NUM_ELEM (vec);
4011
4012 for (i = 0; i < veclen; ++i)
4013 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4014 return true;
4015 }
4016
4017 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4018 return true;
4019
4020 return false;
4021 }
4022
4023 \f
4024 /* Return an estimate of the cost of computing rtx X.
4025 One use is in cse, to decide which expression to keep in the hash table.
4026 Another is in rtl generation, to pick the cheapest way to multiply.
4027 Other uses like the latter are expected in the future.
4028
4029 X appears as operand OPNO in an expression with code OUTER_CODE.
4030 SPEED specifies whether costs optimized for speed or size should
4031 be returned. */
4032
4033 int
4034 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4035 int opno, bool speed)
4036 {
4037 int i, j;
4038 enum rtx_code code;
4039 const char *fmt;
4040 int total;
4041 int factor;
4042
4043 if (x == 0)
4044 return 0;
4045
4046 if (GET_MODE (x) != VOIDmode)
4047 mode = GET_MODE (x);
4048
4049 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4050 many insns, taking N times as long. */
4051 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4052 if (factor == 0)
4053 factor = 1;
4054
4055 /* Compute the default costs of certain things.
4056 Note that targetm.rtx_costs can override the defaults. */
4057
4058 code = GET_CODE (x);
4059 switch (code)
4060 {
4061 case MULT:
4062 /* Multiplication has time-complexity O(N*N), where N is the
4063 number of units (translated from digits) when using
4064 schoolbook long multiplication. */
4065 total = factor * factor * COSTS_N_INSNS (5);
4066 break;
4067 case DIV:
4068 case UDIV:
4069 case MOD:
4070 case UMOD:
4071 /* Similarly, complexity for schoolbook long division. */
4072 total = factor * factor * COSTS_N_INSNS (7);
4073 break;
4074 case USE:
4075 /* Used in combine.c as a marker. */
4076 total = 0;
4077 break;
4078 case SET:
4079 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4080 the mode for the factor. */
4081 mode = GET_MODE (SET_DEST (x));
4082 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4083 if (factor == 0)
4084 factor = 1;
4085 /* FALLTHRU */
4086 default:
4087 total = factor * COSTS_N_INSNS (1);
4088 }
4089
4090 switch (code)
4091 {
4092 case REG:
4093 return 0;
4094
4095 case SUBREG:
4096 total = 0;
4097 /* If we can't tie these modes, make this expensive. The larger
4098 the mode, the more expensive it is. */
4099 if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4100 return COSTS_N_INSNS (2 + factor);
4101 break;
4102
4103 default:
4104 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4105 return total;
4106 break;
4107 }
4108
4109 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4110 which is already in total. */
4111
4112 fmt = GET_RTX_FORMAT (code);
4113 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4114 if (fmt[i] == 'e')
4115 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4116 else if (fmt[i] == 'E')
4117 for (j = 0; j < XVECLEN (x, i); j++)
4118 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4119
4120 return total;
4121 }
4122
4123 /* Fill in the structure C with information about both speed and size rtx
4124 costs for X, which is operand OPNO in an expression with code OUTER. */
4125
4126 void
4127 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4128 struct full_rtx_costs *c)
4129 {
4130 c->speed = rtx_cost (x, mode, outer, opno, true);
4131 c->size = rtx_cost (x, mode, outer, opno, false);
4132 }
4133
4134 \f
4135 /* Return cost of address expression X.
4136 Expect that X is properly formed address reference.
4137
4138 SPEED parameter specify whether costs optimized for speed or size should
4139 be returned. */
4140
4141 int
4142 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4143 {
4144 /* We may be asked for cost of various unusual addresses, such as operands
4145 of push instruction. It is not worthwhile to complicate writing
4146 of the target hook by such cases. */
4147
4148 if (!memory_address_addr_space_p (mode, x, as))
4149 return 1000;
4150
4151 return targetm.address_cost (x, mode, as, speed);
4152 }
4153
4154 /* If the target doesn't override, compute the cost as with arithmetic. */
4155
4156 int
4157 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4158 {
4159 return rtx_cost (x, Pmode, MEM, 0, speed);
4160 }
4161 \f
4162
4163 unsigned HOST_WIDE_INT
4164 nonzero_bits (const_rtx x, machine_mode mode)
4165 {
4166 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4167 }
4168
4169 unsigned int
4170 num_sign_bit_copies (const_rtx x, machine_mode mode)
4171 {
4172 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4173 }
4174
4175 /* Return true if nonzero_bits1 might recurse into both operands
4176 of X. */
4177
4178 static inline bool
4179 nonzero_bits_binary_arith_p (const_rtx x)
4180 {
4181 if (!ARITHMETIC_P (x))
4182 return false;
4183 switch (GET_CODE (x))
4184 {
4185 case AND:
4186 case XOR:
4187 case IOR:
4188 case UMIN:
4189 case UMAX:
4190 case SMIN:
4191 case SMAX:
4192 case PLUS:
4193 case MINUS:
4194 case MULT:
4195 case DIV:
4196 case UDIV:
4197 case MOD:
4198 case UMOD:
4199 return true;
4200 default:
4201 return false;
4202 }
4203 }
4204
4205 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4206 It avoids exponential behavior in nonzero_bits1 when X has
4207 identical subexpressions on the first or the second level. */
4208
4209 static unsigned HOST_WIDE_INT
4210 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4211 machine_mode known_mode,
4212 unsigned HOST_WIDE_INT known_ret)
4213 {
4214 if (x == known_x && mode == known_mode)
4215 return known_ret;
4216
4217 /* Try to find identical subexpressions. If found call
4218 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4219 precomputed value for the subexpression as KNOWN_RET. */
4220
4221 if (nonzero_bits_binary_arith_p (x))
4222 {
4223 rtx x0 = XEXP (x, 0);
4224 rtx x1 = XEXP (x, 1);
4225
4226 /* Check the first level. */
4227 if (x0 == x1)
4228 return nonzero_bits1 (x, mode, x0, mode,
4229 cached_nonzero_bits (x0, mode, known_x,
4230 known_mode, known_ret));
4231
4232 /* Check the second level. */
4233 if (nonzero_bits_binary_arith_p (x0)
4234 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4235 return nonzero_bits1 (x, mode, x1, mode,
4236 cached_nonzero_bits (x1, mode, known_x,
4237 known_mode, known_ret));
4238
4239 if (nonzero_bits_binary_arith_p (x1)
4240 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4241 return nonzero_bits1 (x, mode, x0, mode,
4242 cached_nonzero_bits (x0, mode, known_x,
4243 known_mode, known_ret));
4244 }
4245
4246 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4247 }
4248
4249 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4250 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4251 is less useful. We can't allow both, because that results in exponential
4252 run time recursion. There is a nullstone testcase that triggered
4253 this. This macro avoids accidental uses of num_sign_bit_copies. */
4254 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4255
4256 /* Given an expression, X, compute which bits in X can be nonzero.
4257 We don't care about bits outside of those defined in MODE.
4258
4259 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4260 an arithmetic operation, we can do better. */
4261
4262 static unsigned HOST_WIDE_INT
4263 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4264 machine_mode known_mode,
4265 unsigned HOST_WIDE_INT known_ret)
4266 {
4267 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4268 unsigned HOST_WIDE_INT inner_nz;
4269 enum rtx_code code;
4270 machine_mode inner_mode;
4271 unsigned int mode_width = GET_MODE_PRECISION (mode);
4272
4273 /* For floating-point and vector values, assume all bits are needed. */
4274 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4275 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4276 return nonzero;
4277
4278 /* If X is wider than MODE, use its mode instead. */
4279 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4280 {
4281 mode = GET_MODE (x);
4282 nonzero = GET_MODE_MASK (mode);
4283 mode_width = GET_MODE_PRECISION (mode);
4284 }
4285
4286 if (mode_width > HOST_BITS_PER_WIDE_INT)
4287 /* Our only callers in this case look for single bit values. So
4288 just return the mode mask. Those tests will then be false. */
4289 return nonzero;
4290
4291 /* If MODE is wider than X, but both are a single word for both the host
4292 and target machines, we can compute this from which bits of the
4293 object might be nonzero in its own mode, taking into account the fact
4294 that on many CISC machines, accessing an object in a wider mode
4295 causes the high-order bits to become undefined. So they are
4296 not known to be zero. */
4297
4298 if (!WORD_REGISTER_OPERATIONS
4299 && GET_MODE (x) != VOIDmode
4300 && GET_MODE (x) != mode
4301 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4302 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4303 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4304 {
4305 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4306 known_x, known_mode, known_ret);
4307 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4308 return nonzero;
4309 }
4310
4311 /* Please keep nonzero_bits_binary_arith_p above in sync with
4312 the code in the switch below. */
4313 code = GET_CODE (x);
4314 switch (code)
4315 {
4316 case REG:
4317 #if defined(POINTERS_EXTEND_UNSIGNED)
4318 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4319 all the bits above ptr_mode are known to be zero. */
4320 /* As we do not know which address space the pointer is referring to,
4321 we can do this only if the target does not support different pointer
4322 or address modes depending on the address space. */
4323 if (target_default_pointer_address_modes_p ()
4324 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4325 && REG_POINTER (x)
4326 && !targetm.have_ptr_extend ())
4327 nonzero &= GET_MODE_MASK (ptr_mode);
4328 #endif
4329
4330 /* Include declared information about alignment of pointers. */
4331 /* ??? We don't properly preserve REG_POINTER changes across
4332 pointer-to-integer casts, so we can't trust it except for
4333 things that we know must be pointers. See execute/960116-1.c. */
4334 if ((x == stack_pointer_rtx
4335 || x == frame_pointer_rtx
4336 || x == arg_pointer_rtx)
4337 && REGNO_POINTER_ALIGN (REGNO (x)))
4338 {
4339 unsigned HOST_WIDE_INT alignment
4340 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4341
4342 #ifdef PUSH_ROUNDING
4343 /* If PUSH_ROUNDING is defined, it is possible for the
4344 stack to be momentarily aligned only to that amount,
4345 so we pick the least alignment. */
4346 if (x == stack_pointer_rtx && PUSH_ARGS)
4347 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4348 alignment);
4349 #endif
4350
4351 nonzero &= ~(alignment - 1);
4352 }
4353
4354 {
4355 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4356 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4357 known_mode, known_ret,
4358 &nonzero_for_hook);
4359
4360 if (new_rtx)
4361 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4362 known_mode, known_ret);
4363
4364 return nonzero_for_hook;
4365 }
4366
4367 case CONST_INT:
4368 /* If X is negative in MODE, sign-extend the value. */
4369 if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4370 && mode_width < BITS_PER_WORD
4371 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1)))
4372 != 0)
4373 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4374
4375 return UINTVAL (x);
4376
4377 case MEM:
4378 /* In many, if not most, RISC machines, reading a byte from memory
4379 zeros the rest of the register. Noticing that fact saves a lot
4380 of extra zero-extends. */
4381 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4382 nonzero &= GET_MODE_MASK (GET_MODE (x));
4383 break;
4384
4385 case EQ: case NE:
4386 case UNEQ: case LTGT:
4387 case GT: case GTU: case UNGT:
4388 case LT: case LTU: case UNLT:
4389 case GE: case GEU: case UNGE:
4390 case LE: case LEU: case UNLE:
4391 case UNORDERED: case ORDERED:
4392 /* If this produces an integer result, we know which bits are set.
4393 Code here used to clear bits outside the mode of X, but that is
4394 now done above. */
4395 /* Mind that MODE is the mode the caller wants to look at this
4396 operation in, and not the actual operation mode. We can wind
4397 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4398 that describes the results of a vector compare. */
4399 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4400 && mode_width <= HOST_BITS_PER_WIDE_INT)
4401 nonzero = STORE_FLAG_VALUE;
4402 break;
4403
4404 case NEG:
4405 #if 0
4406 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4407 and num_sign_bit_copies. */
4408 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4409 == GET_MODE_PRECISION (GET_MODE (x)))
4410 nonzero = 1;
4411 #endif
4412
4413 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4414 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4415 break;
4416
4417 case ABS:
4418 #if 0
4419 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4420 and num_sign_bit_copies. */
4421 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4422 == GET_MODE_PRECISION (GET_MODE (x)))
4423 nonzero = 1;
4424 #endif
4425 break;
4426
4427 case TRUNCATE:
4428 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4429 known_x, known_mode, known_ret)
4430 & GET_MODE_MASK (mode));
4431 break;
4432
4433 case ZERO_EXTEND:
4434 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4435 known_x, known_mode, known_ret);
4436 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4437 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4438 break;
4439
4440 case SIGN_EXTEND:
4441 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4442 Otherwise, show all the bits in the outer mode but not the inner
4443 may be nonzero. */
4444 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4445 known_x, known_mode, known_ret);
4446 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4447 {
4448 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4449 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4450 inner_nz |= (GET_MODE_MASK (mode)
4451 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4452 }
4453
4454 nonzero &= inner_nz;
4455 break;
4456
4457 case AND:
4458 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4459 known_x, known_mode, known_ret)
4460 & cached_nonzero_bits (XEXP (x, 1), mode,
4461 known_x, known_mode, known_ret);
4462 break;
4463
4464 case XOR: case IOR:
4465 case UMIN: case UMAX: case SMIN: case SMAX:
4466 {
4467 unsigned HOST_WIDE_INT nonzero0
4468 = cached_nonzero_bits (XEXP (x, 0), mode,
4469 known_x, known_mode, known_ret);
4470
4471 /* Don't call nonzero_bits for the second time if it cannot change
4472 anything. */
4473 if ((nonzero & nonzero0) != nonzero)
4474 nonzero &= nonzero0
4475 | cached_nonzero_bits (XEXP (x, 1), mode,
4476 known_x, known_mode, known_ret);
4477 }
4478 break;
4479
4480 case PLUS: case MINUS:
4481 case MULT:
4482 case DIV: case UDIV:
4483 case MOD: case UMOD:
4484 /* We can apply the rules of arithmetic to compute the number of
4485 high- and low-order zero bits of these operations. We start by
4486 computing the width (position of the highest-order nonzero bit)
4487 and the number of low-order zero bits for each value. */
4488 {
4489 unsigned HOST_WIDE_INT nz0
4490 = cached_nonzero_bits (XEXP (x, 0), mode,
4491 known_x, known_mode, known_ret);
4492 unsigned HOST_WIDE_INT nz1
4493 = cached_nonzero_bits (XEXP (x, 1), mode,
4494 known_x, known_mode, known_ret);
4495 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4496 int width0 = floor_log2 (nz0) + 1;
4497 int width1 = floor_log2 (nz1) + 1;
4498 int low0 = ctz_or_zero (nz0);
4499 int low1 = ctz_or_zero (nz1);
4500 unsigned HOST_WIDE_INT op0_maybe_minusp
4501 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4502 unsigned HOST_WIDE_INT op1_maybe_minusp
4503 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4504 unsigned int result_width = mode_width;
4505 int result_low = 0;
4506
4507 switch (code)
4508 {
4509 case PLUS:
4510 result_width = MAX (width0, width1) + 1;
4511 result_low = MIN (low0, low1);
4512 break;
4513 case MINUS:
4514 result_low = MIN (low0, low1);
4515 break;
4516 case MULT:
4517 result_width = width0 + width1;
4518 result_low = low0 + low1;
4519 break;
4520 case DIV:
4521 if (width1 == 0)
4522 break;
4523 if (!op0_maybe_minusp && !op1_maybe_minusp)
4524 result_width = width0;
4525 break;
4526 case UDIV:
4527 if (width1 == 0)
4528 break;
4529 result_width = width0;
4530 break;
4531 case MOD:
4532 if (width1 == 0)
4533 break;
4534 if (!op0_maybe_minusp && !op1_maybe_minusp)
4535 result_width = MIN (width0, width1);
4536 result_low = MIN (low0, low1);
4537 break;
4538 case UMOD:
4539 if (width1 == 0)
4540 break;
4541 result_width = MIN (width0, width1);
4542 result_low = MIN (low0, low1);
4543 break;
4544 default:
4545 gcc_unreachable ();
4546 }
4547
4548 if (result_width < mode_width)
4549 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4550
4551 if (result_low > 0)
4552 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4553 }
4554 break;
4555
4556 case ZERO_EXTRACT:
4557 if (CONST_INT_P (XEXP (x, 1))
4558 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4559 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4560 break;
4561
4562 case SUBREG:
4563 /* If this is a SUBREG formed for a promoted variable that has
4564 been zero-extended, we know that at least the high-order bits
4565 are zero, though others might be too. */
4566 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4567 nonzero = GET_MODE_MASK (GET_MODE (x))
4568 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4569 known_x, known_mode, known_ret);
4570
4571 /* If the inner mode is a single word for both the host and target
4572 machines, we can compute this from which bits of the inner
4573 object might be nonzero. */
4574 inner_mode = GET_MODE (SUBREG_REG (x));
4575 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4576 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT)
4577 {
4578 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4579 known_x, known_mode, known_ret);
4580
4581 /* On many CISC machines, accessing an object in a wider mode
4582 causes the high-order bits to become undefined. So they are
4583 not known to be zero. */
4584 if ((!WORD_REGISTER_OPERATIONS
4585 /* If this is a typical RISC machine, we only have to worry
4586 about the way loads are extended. */
4587 || (LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4588 ? val_signbit_known_set_p (inner_mode, nonzero)
4589 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4590 || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4591 && GET_MODE_PRECISION (GET_MODE (x))
4592 > GET_MODE_PRECISION (inner_mode))
4593 nonzero
4594 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4595 }
4596 break;
4597
4598 case ASHIFTRT:
4599 case LSHIFTRT:
4600 case ASHIFT:
4601 case ROTATE:
4602 /* The nonzero bits are in two classes: any bits within MODE
4603 that aren't in GET_MODE (x) are always significant. The rest of the
4604 nonzero bits are those that are significant in the operand of
4605 the shift when shifted the appropriate number of bits. This
4606 shows that high-order bits are cleared by the right shift and
4607 low-order bits by left shifts. */
4608 if (CONST_INT_P (XEXP (x, 1))
4609 && INTVAL (XEXP (x, 1)) >= 0
4610 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4611 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4612 {
4613 machine_mode inner_mode = GET_MODE (x);
4614 unsigned int width = GET_MODE_PRECISION (inner_mode);
4615 int count = INTVAL (XEXP (x, 1));
4616 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4617 unsigned HOST_WIDE_INT op_nonzero
4618 = cached_nonzero_bits (XEXP (x, 0), mode,
4619 known_x, known_mode, known_ret);
4620 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4621 unsigned HOST_WIDE_INT outer = 0;
4622
4623 if (mode_width > width)
4624 outer = (op_nonzero & nonzero & ~mode_mask);
4625
4626 if (code == LSHIFTRT)
4627 inner >>= count;
4628 else if (code == ASHIFTRT)
4629 {
4630 inner >>= count;
4631
4632 /* If the sign bit may have been nonzero before the shift, we
4633 need to mark all the places it could have been copied to
4634 by the shift as possibly nonzero. */
4635 if (inner & (HOST_WIDE_INT_1U << (width - 1 - count)))
4636 inner |= ((HOST_WIDE_INT_1U << count) - 1)
4637 << (width - count);
4638 }
4639 else if (code == ASHIFT)
4640 inner <<= count;
4641 else
4642 inner = ((inner << (count % width)
4643 | (inner >> (width - (count % width)))) & mode_mask);
4644
4645 nonzero &= (outer | inner);
4646 }
4647 break;
4648
4649 case FFS:
4650 case POPCOUNT:
4651 /* This is at most the number of bits in the mode. */
4652 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4653 break;
4654
4655 case CLZ:
4656 /* If CLZ has a known value at zero, then the nonzero bits are
4657 that value, plus the number of bits in the mode minus one. */
4658 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4659 nonzero
4660 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4661 else
4662 nonzero = -1;
4663 break;
4664
4665 case CTZ:
4666 /* If CTZ has a known value at zero, then the nonzero bits are
4667 that value, plus the number of bits in the mode minus one. */
4668 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4669 nonzero
4670 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4671 else
4672 nonzero = -1;
4673 break;
4674
4675 case CLRSB:
4676 /* This is at most the number of bits in the mode minus 1. */
4677 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4678 break;
4679
4680 case PARITY:
4681 nonzero = 1;
4682 break;
4683
4684 case IF_THEN_ELSE:
4685 {
4686 unsigned HOST_WIDE_INT nonzero_true
4687 = cached_nonzero_bits (XEXP (x, 1), mode,
4688 known_x, known_mode, known_ret);
4689
4690 /* Don't call nonzero_bits for the second time if it cannot change
4691 anything. */
4692 if ((nonzero & nonzero_true) != nonzero)
4693 nonzero &= nonzero_true
4694 | cached_nonzero_bits (XEXP (x, 2), mode,
4695 known_x, known_mode, known_ret);
4696 }
4697 break;
4698
4699 default:
4700 break;
4701 }
4702
4703 return nonzero;
4704 }
4705
4706 /* See the macro definition above. */
4707 #undef cached_num_sign_bit_copies
4708
4709 \f
4710 /* Return true if num_sign_bit_copies1 might recurse into both operands
4711 of X. */
4712
4713 static inline bool
4714 num_sign_bit_copies_binary_arith_p (const_rtx x)
4715 {
4716 if (!ARITHMETIC_P (x))
4717 return false;
4718 switch (GET_CODE (x))
4719 {
4720 case IOR:
4721 case AND:
4722 case XOR:
4723 case SMIN:
4724 case SMAX:
4725 case UMIN:
4726 case UMAX:
4727 case PLUS:
4728 case MINUS:
4729 case MULT:
4730 return true;
4731 default:
4732 return false;
4733 }
4734 }
4735
4736 /* The function cached_num_sign_bit_copies is a wrapper around
4737 num_sign_bit_copies1. It avoids exponential behavior in
4738 num_sign_bit_copies1 when X has identical subexpressions on the
4739 first or the second level. */
4740
4741 static unsigned int
4742 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4743 machine_mode known_mode,
4744 unsigned int known_ret)
4745 {
4746 if (x == known_x && mode == known_mode)
4747 return known_ret;
4748
4749 /* Try to find identical subexpressions. If found call
4750 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4751 the precomputed value for the subexpression as KNOWN_RET. */
4752
4753 if (num_sign_bit_copies_binary_arith_p (x))
4754 {
4755 rtx x0 = XEXP (x, 0);
4756 rtx x1 = XEXP (x, 1);
4757
4758 /* Check the first level. */
4759 if (x0 == x1)
4760 return
4761 num_sign_bit_copies1 (x, mode, x0, mode,
4762 cached_num_sign_bit_copies (x0, mode, known_x,
4763 known_mode,
4764 known_ret));
4765
4766 /* Check the second level. */
4767 if (num_sign_bit_copies_binary_arith_p (x0)
4768 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4769 return
4770 num_sign_bit_copies1 (x, mode, x1, mode,
4771 cached_num_sign_bit_copies (x1, mode, known_x,
4772 known_mode,
4773 known_ret));
4774
4775 if (num_sign_bit_copies_binary_arith_p (x1)
4776 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4777 return
4778 num_sign_bit_copies1 (x, mode, x0, mode,
4779 cached_num_sign_bit_copies (x0, mode, known_x,
4780 known_mode,
4781 known_ret));
4782 }
4783
4784 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4785 }
4786
4787 /* Return the number of bits at the high-order end of X that are known to
4788 be equal to the sign bit. X will be used in mode MODE; if MODE is
4789 VOIDmode, X will be used in its own mode. The returned value will always
4790 be between 1 and the number of bits in MODE. */
4791
4792 static unsigned int
4793 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4794 machine_mode known_mode,
4795 unsigned int known_ret)
4796 {
4797 enum rtx_code code = GET_CODE (x);
4798 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4799 machine_mode inner_mode;
4800 int num0, num1, result;
4801 unsigned HOST_WIDE_INT nonzero;
4802
4803 /* If we weren't given a mode, use the mode of X. If the mode is still
4804 VOIDmode, we don't know anything. Likewise if one of the modes is
4805 floating-point. */
4806
4807 if (mode == VOIDmode)
4808 mode = GET_MODE (x);
4809
4810 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4811 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4812 return 1;
4813
4814 /* For a smaller object, just ignore the high bits. */
4815 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4816 {
4817 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4818 known_x, known_mode, known_ret);
4819 return MAX (1,
4820 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4821 }
4822
4823 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4824 {
4825 /* If this machine does not do all register operations on the entire
4826 register and MODE is wider than the mode of X, we can say nothing
4827 at all about the high-order bits. */
4828 if (!WORD_REGISTER_OPERATIONS)
4829 return 1;
4830
4831 /* Likewise on machines that do, if the mode of the object is smaller
4832 than a word and loads of that size don't sign extend, we can say
4833 nothing about the high order bits. */
4834 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4835 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND)
4836 return 1;
4837 }
4838
4839 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4840 the code in the switch below. */
4841 switch (code)
4842 {
4843 case REG:
4844
4845 #if defined(POINTERS_EXTEND_UNSIGNED)
4846 /* If pointers extend signed and this is a pointer in Pmode, say that
4847 all the bits above ptr_mode are known to be sign bit copies. */
4848 /* As we do not know which address space the pointer is referring to,
4849 we can do this only if the target does not support different pointer
4850 or address modes depending on the address space. */
4851 if (target_default_pointer_address_modes_p ()
4852 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4853 && mode == Pmode && REG_POINTER (x)
4854 && !targetm.have_ptr_extend ())
4855 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4856 #endif
4857
4858 {
4859 unsigned int copies_for_hook = 1, copies = 1;
4860 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4861 known_mode, known_ret,
4862 &copies_for_hook);
4863
4864 if (new_rtx)
4865 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4866 known_mode, known_ret);
4867
4868 if (copies > 1 || copies_for_hook > 1)
4869 return MAX (copies, copies_for_hook);
4870
4871 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4872 }
4873 break;
4874
4875 case MEM:
4876 /* Some RISC machines sign-extend all loads of smaller than a word. */
4877 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4878 return MAX (1, ((int) bitwidth
4879 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4880 break;
4881
4882 case CONST_INT:
4883 /* If the constant is negative, take its 1's complement and remask.
4884 Then see how many zero bits we have. */
4885 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4886 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4887 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4888 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4889
4890 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4891
4892 case SUBREG:
4893 /* If this is a SUBREG for a promoted object that is sign-extended
4894 and we are looking at it in a wider mode, we know that at least the
4895 high-order bits are known to be sign bit copies. */
4896
4897 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4898 {
4899 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4900 known_x, known_mode, known_ret);
4901 return MAX ((int) bitwidth
4902 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4903 num0);
4904 }
4905
4906 /* For a smaller object, just ignore the high bits. */
4907 inner_mode = GET_MODE (SUBREG_REG (x));
4908 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
4909 {
4910 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4911 known_x, known_mode, known_ret);
4912 return
4913 MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth));
4914 }
4915
4916 /* For paradoxical SUBREGs on machines where all register operations
4917 affect the entire register, just look inside. Note that we are
4918 passing MODE to the recursive call, so the number of sign bit copies
4919 will remain relative to that mode, not the inner mode. */
4920
4921 /* This works only if loads sign extend. Otherwise, if we get a
4922 reload for the inner part, it may be loaded from the stack, and
4923 then we lose all sign bit copies that existed before the store
4924 to the stack. */
4925
4926 if (WORD_REGISTER_OPERATIONS
4927 && GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4928 && LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4929 && paradoxical_subreg_p (x)
4930 && (MEM_P (SUBREG_REG (x)) || REG_P (SUBREG_REG (x))))
4931 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4932 known_x, known_mode, known_ret);
4933 break;
4934
4935 case SIGN_EXTRACT:
4936 if (CONST_INT_P (XEXP (x, 1)))
4937 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4938 break;
4939
4940 case SIGN_EXTEND:
4941 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4942 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4943 known_x, known_mode, known_ret));
4944
4945 case TRUNCATE:
4946 /* For a smaller object, just ignore the high bits. */
4947 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4948 known_x, known_mode, known_ret);
4949 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4950 - bitwidth)));
4951
4952 case NOT:
4953 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4954 known_x, known_mode, known_ret);
4955
4956 case ROTATE: case ROTATERT:
4957 /* If we are rotating left by a number of bits less than the number
4958 of sign bit copies, we can just subtract that amount from the
4959 number. */
4960 if (CONST_INT_P (XEXP (x, 1))
4961 && INTVAL (XEXP (x, 1)) >= 0
4962 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4963 {
4964 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4965 known_x, known_mode, known_ret);
4966 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4967 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4968 }
4969 break;
4970
4971 case NEG:
4972 /* In general, this subtracts one sign bit copy. But if the value
4973 is known to be positive, the number of sign bit copies is the
4974 same as that of the input. Finally, if the input has just one bit
4975 that might be nonzero, all the bits are copies of the sign bit. */
4976 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4977 known_x, known_mode, known_ret);
4978 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4979 return num0 > 1 ? num0 - 1 : 1;
4980
4981 nonzero = nonzero_bits (XEXP (x, 0), mode);
4982 if (nonzero == 1)
4983 return bitwidth;
4984
4985 if (num0 > 1
4986 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
4987 num0--;
4988
4989 return num0;
4990
4991 case IOR: case AND: case XOR:
4992 case SMIN: case SMAX: case UMIN: case UMAX:
4993 /* Logical operations will preserve the number of sign-bit copies.
4994 MIN and MAX operations always return one of the operands. */
4995 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4996 known_x, known_mode, known_ret);
4997 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4998 known_x, known_mode, known_ret);
4999
5000 /* If num1 is clearing some of the top bits then regardless of
5001 the other term, we are guaranteed to have at least that many
5002 high-order zero bits. */
5003 if (code == AND
5004 && num1 > 1
5005 && bitwidth <= HOST_BITS_PER_WIDE_INT
5006 && CONST_INT_P (XEXP (x, 1))
5007 && (UINTVAL (XEXP (x, 1))
5008 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5009 return num1;
5010
5011 /* Similarly for IOR when setting high-order bits. */
5012 if (code == IOR
5013 && num1 > 1
5014 && bitwidth <= HOST_BITS_PER_WIDE_INT
5015 && CONST_INT_P (XEXP (x, 1))
5016 && (UINTVAL (XEXP (x, 1))
5017 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5018 return num1;
5019
5020 return MIN (num0, num1);
5021
5022 case PLUS: case MINUS:
5023 /* For addition and subtraction, we can have a 1-bit carry. However,
5024 if we are subtracting 1 from a positive number, there will not
5025 be such a carry. Furthermore, if the positive number is known to
5026 be 0 or 1, we know the result is either -1 or 0. */
5027
5028 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5029 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5030 {
5031 nonzero = nonzero_bits (XEXP (x, 0), mode);
5032 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5033 return (nonzero == 1 || nonzero == 0 ? bitwidth
5034 : bitwidth - floor_log2 (nonzero) - 1);
5035 }
5036
5037 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5038 known_x, known_mode, known_ret);
5039 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5040 known_x, known_mode, known_ret);
5041 result = MAX (1, MIN (num0, num1) - 1);
5042
5043 return result;
5044
5045 case MULT:
5046 /* The number of bits of the product is the sum of the number of
5047 bits of both terms. However, unless one of the terms if known
5048 to be positive, we must allow for an additional bit since negating
5049 a negative number can remove one sign bit copy. */
5050
5051 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5052 known_x, known_mode, known_ret);
5053 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5054 known_x, known_mode, known_ret);
5055
5056 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5057 if (result > 0
5058 && (bitwidth > HOST_BITS_PER_WIDE_INT
5059 || (((nonzero_bits (XEXP (x, 0), mode)
5060 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5061 && ((nonzero_bits (XEXP (x, 1), mode)
5062 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5063 != 0))))
5064 result--;
5065
5066 return MAX (1, result);
5067
5068 case UDIV:
5069 /* The result must be <= the first operand. If the first operand
5070 has the high bit set, we know nothing about the number of sign
5071 bit copies. */
5072 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5073 return 1;
5074 else if ((nonzero_bits (XEXP (x, 0), mode)
5075 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5076 return 1;
5077 else
5078 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5079 known_x, known_mode, known_ret);
5080
5081 case UMOD:
5082 /* The result must be <= the second operand. If the second operand
5083 has (or just might have) the high bit set, we know nothing about
5084 the number of sign bit copies. */
5085 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5086 return 1;
5087 else if ((nonzero_bits (XEXP (x, 1), mode)
5088 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5089 return 1;
5090 else
5091 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5092 known_x, known_mode, known_ret);
5093
5094 case DIV:
5095 /* Similar to unsigned division, except that we have to worry about
5096 the case where the divisor is negative, in which case we have
5097 to add 1. */
5098 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5099 known_x, known_mode, known_ret);
5100 if (result > 1
5101 && (bitwidth > HOST_BITS_PER_WIDE_INT
5102 || (nonzero_bits (XEXP (x, 1), mode)
5103 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5104 result--;
5105
5106 return result;
5107
5108 case MOD:
5109 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5110 known_x, known_mode, known_ret);
5111 if (result > 1
5112 && (bitwidth > HOST_BITS_PER_WIDE_INT
5113 || (nonzero_bits (XEXP (x, 1), mode)
5114 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5115 result--;
5116
5117 return result;
5118
5119 case ASHIFTRT:
5120 /* Shifts by a constant add to the number of bits equal to the
5121 sign bit. */
5122 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5123 known_x, known_mode, known_ret);
5124 if (CONST_INT_P (XEXP (x, 1))
5125 && INTVAL (XEXP (x, 1)) > 0
5126 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5127 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5128
5129 return num0;
5130
5131 case ASHIFT:
5132 /* Left shifts destroy copies. */
5133 if (!CONST_INT_P (XEXP (x, 1))
5134 || INTVAL (XEXP (x, 1)) < 0
5135 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5136 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5137 return 1;
5138
5139 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5140 known_x, known_mode, known_ret);
5141 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5142
5143 case IF_THEN_ELSE:
5144 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5145 known_x, known_mode, known_ret);
5146 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5147 known_x, known_mode, known_ret);
5148 return MIN (num0, num1);
5149
5150 case EQ: case NE: case GE: case GT: case LE: case LT:
5151 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5152 case GEU: case GTU: case LEU: case LTU:
5153 case UNORDERED: case ORDERED:
5154 /* If the constant is negative, take its 1's complement and remask.
5155 Then see how many zero bits we have. */
5156 nonzero = STORE_FLAG_VALUE;
5157 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5158 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5159 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5160
5161 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5162
5163 default:
5164 break;
5165 }
5166
5167 /* If we haven't been able to figure it out by one of the above rules,
5168 see if some of the high-order bits are known to be zero. If so,
5169 count those bits and return one less than that amount. If we can't
5170 safely compute the mask for this mode, always return BITWIDTH. */
5171
5172 bitwidth = GET_MODE_PRECISION (mode);
5173 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5174 return 1;
5175
5176 nonzero = nonzero_bits (x, mode);
5177 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5178 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5179 }
5180
5181 /* Calculate the rtx_cost of a single instruction. A return value of
5182 zero indicates an instruction pattern without a known cost. */
5183
5184 int
5185 insn_rtx_cost (rtx pat, bool speed)
5186 {
5187 int i, cost;
5188 rtx set;
5189
5190 /* Extract the single set rtx from the instruction pattern.
5191 We can't use single_set since we only have the pattern. */
5192 if (GET_CODE (pat) == SET)
5193 set = pat;
5194 else if (GET_CODE (pat) == PARALLEL)
5195 {
5196 set = NULL_RTX;
5197 for (i = 0; i < XVECLEN (pat, 0); i++)
5198 {
5199 rtx x = XVECEXP (pat, 0, i);
5200 if (GET_CODE (x) == SET)
5201 {
5202 if (set)
5203 return 0;
5204 set = x;
5205 }
5206 }
5207 if (!set)
5208 return 0;
5209 }
5210 else
5211 return 0;
5212
5213 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5214 return cost > 0 ? cost : COSTS_N_INSNS (1);
5215 }
5216
5217 /* Returns estimate on cost of computing SEQ. */
5218
5219 unsigned
5220 seq_cost (const rtx_insn *seq, bool speed)
5221 {
5222 unsigned cost = 0;
5223 rtx set;
5224
5225 for (; seq; seq = NEXT_INSN (seq))
5226 {
5227 set = single_set (seq);
5228 if (set)
5229 cost += set_rtx_cost (set, speed);
5230 else
5231 cost++;
5232 }
5233
5234 return cost;
5235 }
5236
5237 /* Given an insn INSN and condition COND, return the condition in a
5238 canonical form to simplify testing by callers. Specifically:
5239
5240 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5241 (2) Both operands will be machine operands; (cc0) will have been replaced.
5242 (3) If an operand is a constant, it will be the second operand.
5243 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5244 for GE, GEU, and LEU.
5245
5246 If the condition cannot be understood, or is an inequality floating-point
5247 comparison which needs to be reversed, 0 will be returned.
5248
5249 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5250
5251 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5252 insn used in locating the condition was found. If a replacement test
5253 of the condition is desired, it should be placed in front of that
5254 insn and we will be sure that the inputs are still valid.
5255
5256 If WANT_REG is nonzero, we wish the condition to be relative to that
5257 register, if possible. Therefore, do not canonicalize the condition
5258 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5259 to be a compare to a CC mode register.
5260
5261 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5262 and at INSN. */
5263
5264 rtx
5265 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5266 rtx_insn **earliest,
5267 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5268 {
5269 enum rtx_code code;
5270 rtx_insn *prev = insn;
5271 const_rtx set;
5272 rtx tem;
5273 rtx op0, op1;
5274 int reverse_code = 0;
5275 machine_mode mode;
5276 basic_block bb = BLOCK_FOR_INSN (insn);
5277
5278 code = GET_CODE (cond);
5279 mode = GET_MODE (cond);
5280 op0 = XEXP (cond, 0);
5281 op1 = XEXP (cond, 1);
5282
5283 if (reverse)
5284 code = reversed_comparison_code (cond, insn);
5285 if (code == UNKNOWN)
5286 return 0;
5287
5288 if (earliest)
5289 *earliest = insn;
5290
5291 /* If we are comparing a register with zero, see if the register is set
5292 in the previous insn to a COMPARE or a comparison operation. Perform
5293 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5294 in cse.c */
5295
5296 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5297 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5298 && op1 == CONST0_RTX (GET_MODE (op0))
5299 && op0 != want_reg)
5300 {
5301 /* Set nonzero when we find something of interest. */
5302 rtx x = 0;
5303
5304 /* If comparison with cc0, import actual comparison from compare
5305 insn. */
5306 if (op0 == cc0_rtx)
5307 {
5308 if ((prev = prev_nonnote_insn (prev)) == 0
5309 || !NONJUMP_INSN_P (prev)
5310 || (set = single_set (prev)) == 0
5311 || SET_DEST (set) != cc0_rtx)
5312 return 0;
5313
5314 op0 = SET_SRC (set);
5315 op1 = CONST0_RTX (GET_MODE (op0));
5316 if (earliest)
5317 *earliest = prev;
5318 }
5319
5320 /* If this is a COMPARE, pick up the two things being compared. */
5321 if (GET_CODE (op0) == COMPARE)
5322 {
5323 op1 = XEXP (op0, 1);
5324 op0 = XEXP (op0, 0);
5325 continue;
5326 }
5327 else if (!REG_P (op0))
5328 break;
5329
5330 /* Go back to the previous insn. Stop if it is not an INSN. We also
5331 stop if it isn't a single set or if it has a REG_INC note because
5332 we don't want to bother dealing with it. */
5333
5334 prev = prev_nonnote_nondebug_insn (prev);
5335
5336 if (prev == 0
5337 || !NONJUMP_INSN_P (prev)
5338 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5339 /* In cfglayout mode, there do not have to be labels at the
5340 beginning of a block, or jumps at the end, so the previous
5341 conditions would not stop us when we reach bb boundary. */
5342 || BLOCK_FOR_INSN (prev) != bb)
5343 break;
5344
5345 set = set_of (op0, prev);
5346
5347 if (set
5348 && (GET_CODE (set) != SET
5349 || !rtx_equal_p (SET_DEST (set), op0)))
5350 break;
5351
5352 /* If this is setting OP0, get what it sets it to if it looks
5353 relevant. */
5354 if (set)
5355 {
5356 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5357 #ifdef FLOAT_STORE_FLAG_VALUE
5358 REAL_VALUE_TYPE fsfv;
5359 #endif
5360
5361 /* ??? We may not combine comparisons done in a CCmode with
5362 comparisons not done in a CCmode. This is to aid targets
5363 like Alpha that have an IEEE compliant EQ instruction, and
5364 a non-IEEE compliant BEQ instruction. The use of CCmode is
5365 actually artificial, simply to prevent the combination, but
5366 should not affect other platforms.
5367
5368 However, we must allow VOIDmode comparisons to match either
5369 CCmode or non-CCmode comparison, because some ports have
5370 modeless comparisons inside branch patterns.
5371
5372 ??? This mode check should perhaps look more like the mode check
5373 in simplify_comparison in combine. */
5374 if (((GET_MODE_CLASS (mode) == MODE_CC)
5375 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5376 && mode != VOIDmode
5377 && inner_mode != VOIDmode)
5378 break;
5379 if (GET_CODE (SET_SRC (set)) == COMPARE
5380 || (((code == NE
5381 || (code == LT
5382 && val_signbit_known_set_p (inner_mode,
5383 STORE_FLAG_VALUE))
5384 #ifdef FLOAT_STORE_FLAG_VALUE
5385 || (code == LT
5386 && SCALAR_FLOAT_MODE_P (inner_mode)
5387 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5388 REAL_VALUE_NEGATIVE (fsfv)))
5389 #endif
5390 ))
5391 && COMPARISON_P (SET_SRC (set))))
5392 x = SET_SRC (set);
5393 else if (((code == EQ
5394 || (code == GE
5395 && val_signbit_known_set_p (inner_mode,
5396 STORE_FLAG_VALUE))
5397 #ifdef FLOAT_STORE_FLAG_VALUE
5398 || (code == GE
5399 && SCALAR_FLOAT_MODE_P (inner_mode)
5400 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5401 REAL_VALUE_NEGATIVE (fsfv)))
5402 #endif
5403 ))
5404 && COMPARISON_P (SET_SRC (set)))
5405 {
5406 reverse_code = 1;
5407 x = SET_SRC (set);
5408 }
5409 else if ((code == EQ || code == NE)
5410 && GET_CODE (SET_SRC (set)) == XOR)
5411 /* Handle sequences like:
5412
5413 (set op0 (xor X Y))
5414 ...(eq|ne op0 (const_int 0))...
5415
5416 in which case:
5417
5418 (eq op0 (const_int 0)) reduces to (eq X Y)
5419 (ne op0 (const_int 0)) reduces to (ne X Y)
5420
5421 This is the form used by MIPS16, for example. */
5422 x = SET_SRC (set);
5423 else
5424 break;
5425 }
5426
5427 else if (reg_set_p (op0, prev))
5428 /* If this sets OP0, but not directly, we have to give up. */
5429 break;
5430
5431 if (x)
5432 {
5433 /* If the caller is expecting the condition to be valid at INSN,
5434 make sure X doesn't change before INSN. */
5435 if (valid_at_insn_p)
5436 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5437 break;
5438 if (COMPARISON_P (x))
5439 code = GET_CODE (x);
5440 if (reverse_code)
5441 {
5442 code = reversed_comparison_code (x, prev);
5443 if (code == UNKNOWN)
5444 return 0;
5445 reverse_code = 0;
5446 }
5447
5448 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5449 if (earliest)
5450 *earliest = prev;
5451 }
5452 }
5453
5454 /* If constant is first, put it last. */
5455 if (CONSTANT_P (op0))
5456 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5457
5458 /* If OP0 is the result of a comparison, we weren't able to find what
5459 was really being compared, so fail. */
5460 if (!allow_cc_mode
5461 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5462 return 0;
5463
5464 /* Canonicalize any ordered comparison with integers involving equality
5465 if we can do computations in the relevant mode and we do not
5466 overflow. */
5467
5468 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5469 && CONST_INT_P (op1)
5470 && GET_MODE (op0) != VOIDmode
5471 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5472 {
5473 HOST_WIDE_INT const_val = INTVAL (op1);
5474 unsigned HOST_WIDE_INT uconst_val = const_val;
5475 unsigned HOST_WIDE_INT max_val
5476 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5477
5478 switch (code)
5479 {
5480 case LE:
5481 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5482 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5483 break;
5484
5485 /* When cross-compiling, const_val might be sign-extended from
5486 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5487 case GE:
5488 if ((const_val & max_val)
5489 != (HOST_WIDE_INT_1U
5490 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5491 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5492 break;
5493
5494 case LEU:
5495 if (uconst_val < max_val)
5496 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5497 break;
5498
5499 case GEU:
5500 if (uconst_val != 0)
5501 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5502 break;
5503
5504 default:
5505 break;
5506 }
5507 }
5508
5509 /* Never return CC0; return zero instead. */
5510 if (CC0_P (op0))
5511 return 0;
5512
5513 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5514 }
5515
5516 /* Given a jump insn JUMP, return the condition that will cause it to branch
5517 to its JUMP_LABEL. If the condition cannot be understood, or is an
5518 inequality floating-point comparison which needs to be reversed, 0 will
5519 be returned.
5520
5521 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5522 insn used in locating the condition was found. If a replacement test
5523 of the condition is desired, it should be placed in front of that
5524 insn and we will be sure that the inputs are still valid. If EARLIEST
5525 is null, the returned condition will be valid at INSN.
5526
5527 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5528 compare CC mode register.
5529
5530 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5531
5532 rtx
5533 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5534 int valid_at_insn_p)
5535 {
5536 rtx cond;
5537 int reverse;
5538 rtx set;
5539
5540 /* If this is not a standard conditional jump, we can't parse it. */
5541 if (!JUMP_P (jump)
5542 || ! any_condjump_p (jump))
5543 return 0;
5544 set = pc_set (jump);
5545
5546 cond = XEXP (SET_SRC (set), 0);
5547
5548 /* If this branches to JUMP_LABEL when the condition is false, reverse
5549 the condition. */
5550 reverse
5551 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5552 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5553
5554 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5555 allow_cc_mode, valid_at_insn_p);
5556 }
5557
5558 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5559 TARGET_MODE_REP_EXTENDED.
5560
5561 Note that we assume that the property of
5562 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5563 narrower than mode B. I.e., if A is a mode narrower than B then in
5564 order to be able to operate on it in mode B, mode A needs to
5565 satisfy the requirements set by the representation of mode B. */
5566
5567 static void
5568 init_num_sign_bit_copies_in_rep (void)
5569 {
5570 machine_mode mode, in_mode;
5571
5572 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5573 in_mode = GET_MODE_WIDER_MODE (mode))
5574 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5575 mode = GET_MODE_WIDER_MODE (mode))
5576 {
5577 machine_mode i;
5578
5579 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5580 extends to the next widest mode. */
5581 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5582 || GET_MODE_WIDER_MODE (mode) == in_mode);
5583
5584 /* We are in in_mode. Count how many bits outside of mode
5585 have to be copies of the sign-bit. */
5586 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5587 {
5588 machine_mode wider = GET_MODE_WIDER_MODE (i);
5589
5590 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5591 /* We can only check sign-bit copies starting from the
5592 top-bit. In order to be able to check the bits we
5593 have already seen we pretend that subsequent bits
5594 have to be sign-bit copies too. */
5595 || num_sign_bit_copies_in_rep [in_mode][mode])
5596 num_sign_bit_copies_in_rep [in_mode][mode]
5597 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5598 }
5599 }
5600 }
5601
5602 /* Suppose that truncation from the machine mode of X to MODE is not a
5603 no-op. See if there is anything special about X so that we can
5604 assume it already contains a truncated value of MODE. */
5605
5606 bool
5607 truncated_to_mode (machine_mode mode, const_rtx x)
5608 {
5609 /* This register has already been used in MODE without explicit
5610 truncation. */
5611 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5612 return true;
5613
5614 /* See if we already satisfy the requirements of MODE. If yes we
5615 can just switch to MODE. */
5616 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5617 && (num_sign_bit_copies (x, GET_MODE (x))
5618 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5619 return true;
5620
5621 return false;
5622 }
5623 \f
5624 /* Return true if RTX code CODE has a single sequence of zero or more
5625 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5626 entry in that case. */
5627
5628 static bool
5629 setup_reg_subrtx_bounds (unsigned int code)
5630 {
5631 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5632 unsigned int i = 0;
5633 for (; format[i] != 'e'; ++i)
5634 {
5635 if (!format[i])
5636 /* No subrtxes. Leave start and count as 0. */
5637 return true;
5638 if (format[i] == 'E' || format[i] == 'V')
5639 return false;
5640 }
5641
5642 /* Record the sequence of 'e's. */
5643 rtx_all_subrtx_bounds[code].start = i;
5644 do
5645 ++i;
5646 while (format[i] == 'e');
5647 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5648 /* rtl-iter.h relies on this. */
5649 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5650
5651 for (; format[i]; ++i)
5652 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5653 return false;
5654
5655 return true;
5656 }
5657
5658 /* Initialize rtx_all_subrtx_bounds. */
5659 void
5660 init_rtlanal (void)
5661 {
5662 int i;
5663 for (i = 0; i < NUM_RTX_CODE; i++)
5664 {
5665 if (!setup_reg_subrtx_bounds (i))
5666 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5667 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5668 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5669 }
5670
5671 init_num_sign_bit_copies_in_rep ();
5672 }
5673 \f
5674 /* Check whether this is a constant pool constant. */
5675 bool
5676 constant_pool_constant_p (rtx x)
5677 {
5678 x = avoid_constant_pool_reference (x);
5679 return CONST_DOUBLE_P (x);
5680 }
5681 \f
5682 /* If M is a bitmask that selects a field of low-order bits within an item but
5683 not the entire word, return the length of the field. Return -1 otherwise.
5684 M is used in machine mode MODE. */
5685
5686 int
5687 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5688 {
5689 if (mode != VOIDmode)
5690 {
5691 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5692 return -1;
5693 m &= GET_MODE_MASK (mode);
5694 }
5695
5696 return exact_log2 (m + 1);
5697 }
5698
5699 /* Return the mode of MEM's address. */
5700
5701 machine_mode
5702 get_address_mode (rtx mem)
5703 {
5704 machine_mode mode;
5705
5706 gcc_assert (MEM_P (mem));
5707 mode = GET_MODE (XEXP (mem, 0));
5708 if (mode != VOIDmode)
5709 return mode;
5710 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5711 }
5712 \f
5713 /* Split up a CONST_DOUBLE or integer constant rtx
5714 into two rtx's for single words,
5715 storing in *FIRST the word that comes first in memory in the target
5716 and in *SECOND the other.
5717
5718 TODO: This function needs to be rewritten to work on any size
5719 integer. */
5720
5721 void
5722 split_double (rtx value, rtx *first, rtx *second)
5723 {
5724 if (CONST_INT_P (value))
5725 {
5726 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5727 {
5728 /* In this case the CONST_INT holds both target words.
5729 Extract the bits from it into two word-sized pieces.
5730 Sign extend each half to HOST_WIDE_INT. */
5731 unsigned HOST_WIDE_INT low, high;
5732 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5733 unsigned bits_per_word = BITS_PER_WORD;
5734
5735 /* Set sign_bit to the most significant bit of a word. */
5736 sign_bit = 1;
5737 sign_bit <<= bits_per_word - 1;
5738
5739 /* Set mask so that all bits of the word are set. We could
5740 have used 1 << BITS_PER_WORD instead of basing the
5741 calculation on sign_bit. However, on machines where
5742 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5743 compiler warning, even though the code would never be
5744 executed. */
5745 mask = sign_bit << 1;
5746 mask--;
5747
5748 /* Set sign_extend as any remaining bits. */
5749 sign_extend = ~mask;
5750
5751 /* Pick the lower word and sign-extend it. */
5752 low = INTVAL (value);
5753 low &= mask;
5754 if (low & sign_bit)
5755 low |= sign_extend;
5756
5757 /* Pick the higher word, shifted to the least significant
5758 bits, and sign-extend it. */
5759 high = INTVAL (value);
5760 high >>= bits_per_word - 1;
5761 high >>= 1;
5762 high &= mask;
5763 if (high & sign_bit)
5764 high |= sign_extend;
5765
5766 /* Store the words in the target machine order. */
5767 if (WORDS_BIG_ENDIAN)
5768 {
5769 *first = GEN_INT (high);
5770 *second = GEN_INT (low);
5771 }
5772 else
5773 {
5774 *first = GEN_INT (low);
5775 *second = GEN_INT (high);
5776 }
5777 }
5778 else
5779 {
5780 /* The rule for using CONST_INT for a wider mode
5781 is that we regard the value as signed.
5782 So sign-extend it. */
5783 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5784 if (WORDS_BIG_ENDIAN)
5785 {
5786 *first = high;
5787 *second = value;
5788 }
5789 else
5790 {
5791 *first = value;
5792 *second = high;
5793 }
5794 }
5795 }
5796 else if (GET_CODE (value) == CONST_WIDE_INT)
5797 {
5798 /* All of this is scary code and needs to be converted to
5799 properly work with any size integer. */
5800 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5801 if (WORDS_BIG_ENDIAN)
5802 {
5803 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5804 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5805 }
5806 else
5807 {
5808 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5809 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5810 }
5811 }
5812 else if (!CONST_DOUBLE_P (value))
5813 {
5814 if (WORDS_BIG_ENDIAN)
5815 {
5816 *first = const0_rtx;
5817 *second = value;
5818 }
5819 else
5820 {
5821 *first = value;
5822 *second = const0_rtx;
5823 }
5824 }
5825 else if (GET_MODE (value) == VOIDmode
5826 /* This is the old way we did CONST_DOUBLE integers. */
5827 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5828 {
5829 /* In an integer, the words are defined as most and least significant.
5830 So order them by the target's convention. */
5831 if (WORDS_BIG_ENDIAN)
5832 {
5833 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5834 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5835 }
5836 else
5837 {
5838 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5839 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5840 }
5841 }
5842 else
5843 {
5844 long l[2];
5845
5846 /* Note, this converts the REAL_VALUE_TYPE to the target's
5847 format, splits up the floating point double and outputs
5848 exactly 32 bits of it into each of l[0] and l[1] --
5849 not necessarily BITS_PER_WORD bits. */
5850 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5851
5852 /* If 32 bits is an entire word for the target, but not for the host,
5853 then sign-extend on the host so that the number will look the same
5854 way on the host that it would on the target. See for instance
5855 simplify_unary_operation. The #if is needed to avoid compiler
5856 warnings. */
5857
5858 #if HOST_BITS_PER_LONG > 32
5859 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5860 {
5861 if (l[0] & ((long) 1 << 31))
5862 l[0] |= ((unsigned long) (-1) << 32);
5863 if (l[1] & ((long) 1 << 31))
5864 l[1] |= ((unsigned long) (-1) << 32);
5865 }
5866 #endif
5867
5868 *first = GEN_INT (l[0]);
5869 *second = GEN_INT (l[1]);
5870 }
5871 }
5872
5873 /* Return true if X is a sign_extract or zero_extract from the least
5874 significant bit. */
5875
5876 static bool
5877 lsb_bitfield_op_p (rtx x)
5878 {
5879 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5880 {
5881 machine_mode mode = GET_MODE (XEXP (x, 0));
5882 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5883 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5884
5885 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5886 }
5887 return false;
5888 }
5889
5890 /* Strip outer address "mutations" from LOC and return a pointer to the
5891 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5892 stripped expression there.
5893
5894 "Mutations" either convert between modes or apply some kind of
5895 extension, truncation or alignment. */
5896
5897 rtx *
5898 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5899 {
5900 for (;;)
5901 {
5902 enum rtx_code code = GET_CODE (*loc);
5903 if (GET_RTX_CLASS (code) == RTX_UNARY)
5904 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5905 used to convert between pointer sizes. */
5906 loc = &XEXP (*loc, 0);
5907 else if (lsb_bitfield_op_p (*loc))
5908 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5909 acts as a combined truncation and extension. */
5910 loc = &XEXP (*loc, 0);
5911 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5912 /* (and ... (const_int -X)) is used to align to X bytes. */
5913 loc = &XEXP (*loc, 0);
5914 else if (code == SUBREG
5915 && !OBJECT_P (SUBREG_REG (*loc))
5916 && subreg_lowpart_p (*loc))
5917 /* (subreg (operator ...) ...) inside and is used for mode
5918 conversion too. */
5919 loc = &SUBREG_REG (*loc);
5920 else
5921 return loc;
5922 if (outer_code)
5923 *outer_code = code;
5924 }
5925 }
5926
5927 /* Return true if CODE applies some kind of scale. The scaled value is
5928 is the first operand and the scale is the second. */
5929
5930 static bool
5931 binary_scale_code_p (enum rtx_code code)
5932 {
5933 return (code == MULT
5934 || code == ASHIFT
5935 /* Needed by ARM targets. */
5936 || code == ASHIFTRT
5937 || code == LSHIFTRT
5938 || code == ROTATE
5939 || code == ROTATERT);
5940 }
5941
5942 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5943 (see address_info). Return null otherwise. */
5944
5945 static rtx *
5946 get_base_term (rtx *inner)
5947 {
5948 if (GET_CODE (*inner) == LO_SUM)
5949 inner = strip_address_mutations (&XEXP (*inner, 0));
5950 if (REG_P (*inner)
5951 || MEM_P (*inner)
5952 || GET_CODE (*inner) == SUBREG
5953 || GET_CODE (*inner) == SCRATCH)
5954 return inner;
5955 return 0;
5956 }
5957
5958 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5959 (see address_info). Return null otherwise. */
5960
5961 static rtx *
5962 get_index_term (rtx *inner)
5963 {
5964 /* At present, only constant scales are allowed. */
5965 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5966 inner = strip_address_mutations (&XEXP (*inner, 0));
5967 if (REG_P (*inner)
5968 || MEM_P (*inner)
5969 || GET_CODE (*inner) == SUBREG
5970 || GET_CODE (*inner) == SCRATCH)
5971 return inner;
5972 return 0;
5973 }
5974
5975 /* Set the segment part of address INFO to LOC, given that INNER is the
5976 unmutated value. */
5977
5978 static void
5979 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5980 {
5981 gcc_assert (!info->segment);
5982 info->segment = loc;
5983 info->segment_term = inner;
5984 }
5985
5986 /* Set the base part of address INFO to LOC, given that INNER is the
5987 unmutated value. */
5988
5989 static void
5990 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5991 {
5992 gcc_assert (!info->base);
5993 info->base = loc;
5994 info->base_term = inner;
5995 }
5996
5997 /* Set the index part of address INFO to LOC, given that INNER is the
5998 unmutated value. */
5999
6000 static void
6001 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6002 {
6003 gcc_assert (!info->index);
6004 info->index = loc;
6005 info->index_term = inner;
6006 }
6007
6008 /* Set the displacement part of address INFO to LOC, given that INNER
6009 is the constant term. */
6010
6011 static void
6012 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6013 {
6014 gcc_assert (!info->disp);
6015 info->disp = loc;
6016 info->disp_term = inner;
6017 }
6018
6019 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6020 rest of INFO accordingly. */
6021
6022 static void
6023 decompose_incdec_address (struct address_info *info)
6024 {
6025 info->autoinc_p = true;
6026
6027 rtx *base = &XEXP (*info->inner, 0);
6028 set_address_base (info, base, base);
6029 gcc_checking_assert (info->base == info->base_term);
6030
6031 /* These addresses are only valid when the size of the addressed
6032 value is known. */
6033 gcc_checking_assert (info->mode != VOIDmode);
6034 }
6035
6036 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6037 of INFO accordingly. */
6038
6039 static void
6040 decompose_automod_address (struct address_info *info)
6041 {
6042 info->autoinc_p = true;
6043
6044 rtx *base = &XEXP (*info->inner, 0);
6045 set_address_base (info, base, base);
6046 gcc_checking_assert (info->base == info->base_term);
6047
6048 rtx plus = XEXP (*info->inner, 1);
6049 gcc_assert (GET_CODE (plus) == PLUS);
6050
6051 info->base_term2 = &XEXP (plus, 0);
6052 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6053
6054 rtx *step = &XEXP (plus, 1);
6055 rtx *inner_step = strip_address_mutations (step);
6056 if (CONSTANT_P (*inner_step))
6057 set_address_disp (info, step, inner_step);
6058 else
6059 set_address_index (info, step, inner_step);
6060 }
6061
6062 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6063 values in [PTR, END). Return a pointer to the end of the used array. */
6064
6065 static rtx **
6066 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6067 {
6068 rtx x = *loc;
6069 if (GET_CODE (x) == PLUS)
6070 {
6071 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6072 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6073 }
6074 else
6075 {
6076 gcc_assert (ptr != end);
6077 *ptr++ = loc;
6078 }
6079 return ptr;
6080 }
6081
6082 /* Evaluate the likelihood of X being a base or index value, returning
6083 positive if it is likely to be a base, negative if it is likely to be
6084 an index, and 0 if we can't tell. Make the magnitude of the return
6085 value reflect the amount of confidence we have in the answer.
6086
6087 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6088
6089 static int
6090 baseness (rtx x, machine_mode mode, addr_space_t as,
6091 enum rtx_code outer_code, enum rtx_code index_code)
6092 {
6093 /* Believe *_POINTER unless the address shape requires otherwise. */
6094 if (REG_P (x) && REG_POINTER (x))
6095 return 2;
6096 if (MEM_P (x) && MEM_POINTER (x))
6097 return 2;
6098
6099 if (REG_P (x) && HARD_REGISTER_P (x))
6100 {
6101 /* X is a hard register. If it only fits one of the base
6102 or index classes, choose that interpretation. */
6103 int regno = REGNO (x);
6104 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6105 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6106 if (base_p != index_p)
6107 return base_p ? 1 : -1;
6108 }
6109 return 0;
6110 }
6111
6112 /* INFO->INNER describes a normal, non-automodified address.
6113 Fill in the rest of INFO accordingly. */
6114
6115 static void
6116 decompose_normal_address (struct address_info *info)
6117 {
6118 /* Treat the address as the sum of up to four values. */
6119 rtx *ops[4];
6120 size_t n_ops = extract_plus_operands (info->inner, ops,
6121 ops + ARRAY_SIZE (ops)) - ops;
6122
6123 /* If there is more than one component, any base component is in a PLUS. */
6124 if (n_ops > 1)
6125 info->base_outer_code = PLUS;
6126
6127 /* Try to classify each sum operand now. Leave those that could be
6128 either a base or an index in OPS. */
6129 rtx *inner_ops[4];
6130 size_t out = 0;
6131 for (size_t in = 0; in < n_ops; ++in)
6132 {
6133 rtx *loc = ops[in];
6134 rtx *inner = strip_address_mutations (loc);
6135 if (CONSTANT_P (*inner))
6136 set_address_disp (info, loc, inner);
6137 else if (GET_CODE (*inner) == UNSPEC)
6138 set_address_segment (info, loc, inner);
6139 else
6140 {
6141 /* The only other possibilities are a base or an index. */
6142 rtx *base_term = get_base_term (inner);
6143 rtx *index_term = get_index_term (inner);
6144 gcc_assert (base_term || index_term);
6145 if (!base_term)
6146 set_address_index (info, loc, index_term);
6147 else if (!index_term)
6148 set_address_base (info, loc, base_term);
6149 else
6150 {
6151 gcc_assert (base_term == index_term);
6152 ops[out] = loc;
6153 inner_ops[out] = base_term;
6154 ++out;
6155 }
6156 }
6157 }
6158
6159 /* Classify the remaining OPS members as bases and indexes. */
6160 if (out == 1)
6161 {
6162 /* If we haven't seen a base or an index yet, assume that this is
6163 the base. If we were confident that another term was the base
6164 or index, treat the remaining operand as the other kind. */
6165 if (!info->base)
6166 set_address_base (info, ops[0], inner_ops[0]);
6167 else
6168 set_address_index (info, ops[0], inner_ops[0]);
6169 }
6170 else if (out == 2)
6171 {
6172 /* In the event of a tie, assume the base comes first. */
6173 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6174 GET_CODE (*ops[1]))
6175 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6176 GET_CODE (*ops[0])))
6177 {
6178 set_address_base (info, ops[0], inner_ops[0]);
6179 set_address_index (info, ops[1], inner_ops[1]);
6180 }
6181 else
6182 {
6183 set_address_base (info, ops[1], inner_ops[1]);
6184 set_address_index (info, ops[0], inner_ops[0]);
6185 }
6186 }
6187 else
6188 gcc_assert (out == 0);
6189 }
6190
6191 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6192 or VOIDmode if not known. AS is the address space associated with LOC.
6193 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6194
6195 void
6196 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6197 addr_space_t as, enum rtx_code outer_code)
6198 {
6199 memset (info, 0, sizeof (*info));
6200 info->mode = mode;
6201 info->as = as;
6202 info->addr_outer_code = outer_code;
6203 info->outer = loc;
6204 info->inner = strip_address_mutations (loc, &outer_code);
6205 info->base_outer_code = outer_code;
6206 switch (GET_CODE (*info->inner))
6207 {
6208 case PRE_DEC:
6209 case PRE_INC:
6210 case POST_DEC:
6211 case POST_INC:
6212 decompose_incdec_address (info);
6213 break;
6214
6215 case PRE_MODIFY:
6216 case POST_MODIFY:
6217 decompose_automod_address (info);
6218 break;
6219
6220 default:
6221 decompose_normal_address (info);
6222 break;
6223 }
6224 }
6225
6226 /* Describe address operand LOC in INFO. */
6227
6228 void
6229 decompose_lea_address (struct address_info *info, rtx *loc)
6230 {
6231 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6232 }
6233
6234 /* Describe the address of MEM X in INFO. */
6235
6236 void
6237 decompose_mem_address (struct address_info *info, rtx x)
6238 {
6239 gcc_assert (MEM_P (x));
6240 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6241 MEM_ADDR_SPACE (x), MEM);
6242 }
6243
6244 /* Update INFO after a change to the address it describes. */
6245
6246 void
6247 update_address (struct address_info *info)
6248 {
6249 decompose_address (info, info->outer, info->mode, info->as,
6250 info->addr_outer_code);
6251 }
6252
6253 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6254 more complicated than that. */
6255
6256 HOST_WIDE_INT
6257 get_index_scale (const struct address_info *info)
6258 {
6259 rtx index = *info->index;
6260 if (GET_CODE (index) == MULT
6261 && CONST_INT_P (XEXP (index, 1))
6262 && info->index_term == &XEXP (index, 0))
6263 return INTVAL (XEXP (index, 1));
6264
6265 if (GET_CODE (index) == ASHIFT
6266 && CONST_INT_P (XEXP (index, 1))
6267 && info->index_term == &XEXP (index, 0))
6268 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6269
6270 if (info->index == info->index_term)
6271 return 1;
6272
6273 return 0;
6274 }
6275
6276 /* Return the "index code" of INFO, in the form required by
6277 ok_for_base_p_1. */
6278
6279 enum rtx_code
6280 get_index_code (const struct address_info *info)
6281 {
6282 if (info->index)
6283 return GET_CODE (*info->index);
6284
6285 if (info->disp)
6286 return GET_CODE (*info->disp);
6287
6288 return SCRATCH;
6289 }
6290
6291 /* Return true if RTL X contains a SYMBOL_REF. */
6292
6293 bool
6294 contains_symbol_ref_p (const_rtx x)
6295 {
6296 subrtx_iterator::array_type array;
6297 FOR_EACH_SUBRTX (iter, array, x, ALL)
6298 if (SYMBOL_REF_P (*iter))
6299 return true;
6300
6301 return false;
6302 }
6303
6304 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6305
6306 bool
6307 contains_symbolic_reference_p (const_rtx x)
6308 {
6309 subrtx_iterator::array_type array;
6310 FOR_EACH_SUBRTX (iter, array, x, ALL)
6311 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6312 return true;
6313
6314 return false;
6315 }
6316
6317 /* Return true if X contains a thread-local symbol. */
6318
6319 bool
6320 tls_referenced_p (const_rtx x)
6321 {
6322 if (!targetm.have_tls)
6323 return false;
6324
6325 subrtx_iterator::array_type array;
6326 FOR_EACH_SUBRTX (iter, array, x, ALL)
6327 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6328 return true;
6329 return false;
6330 }