Make gcc::context be GC-managed
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 \f
81 /* Return 1 if the value of X is unstable
82 (would be different at a different point in the program).
83 The frame pointer, arg pointer, etc. are considered stable
84 (within one function) and so is anything marked `unchanging'. */
85
86 int
87 rtx_unstable_p (const_rtx x)
88 {
89 const RTX_CODE code = GET_CODE (x);
90 int i;
91 const char *fmt;
92
93 switch (code)
94 {
95 case MEM:
96 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
97
98 case CONST:
99 CASE_CONST_ANY:
100 case SYMBOL_REF:
101 case LABEL_REF:
102 return 0;
103
104 case REG:
105 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
106 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
107 /* The arg pointer varies if it is not a fixed register. */
108 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
109 return 0;
110 /* ??? When call-clobbered, the value is stable modulo the restore
111 that must happen after a call. This currently screws up local-alloc
112 into believing that the restore is not needed. */
113 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
114 return 0;
115 return 1;
116
117 case ASM_OPERANDS:
118 if (MEM_VOLATILE_P (x))
119 return 1;
120
121 /* Fall through. */
122
123 default:
124 break;
125 }
126
127 fmt = GET_RTX_FORMAT (code);
128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
129 if (fmt[i] == 'e')
130 {
131 if (rtx_unstable_p (XEXP (x, i)))
132 return 1;
133 }
134 else if (fmt[i] == 'E')
135 {
136 int j;
137 for (j = 0; j < XVECLEN (x, i); j++)
138 if (rtx_unstable_p (XVECEXP (x, i, j)))
139 return 1;
140 }
141
142 return 0;
143 }
144
145 /* Return 1 if X has a value that can vary even between two
146 executions of the program. 0 means X can be compared reliably
147 against certain constants or near-constants.
148 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
149 zero, we are slightly more conservative.
150 The frame pointer and the arg pointer are considered constant. */
151
152 bool
153 rtx_varies_p (const_rtx x, bool for_alias)
154 {
155 RTX_CODE code;
156 int i;
157 const char *fmt;
158
159 if (!x)
160 return 0;
161
162 code = GET_CODE (x);
163 switch (code)
164 {
165 case MEM:
166 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
167
168 case CONST:
169 CASE_CONST_ANY:
170 case SYMBOL_REF:
171 case LABEL_REF:
172 return 0;
173
174 case REG:
175 /* Note that we have to test for the actual rtx used for the frame
176 and arg pointers and not just the register number in case we have
177 eliminated the frame and/or arg pointer and are using it
178 for pseudos. */
179 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
180 /* The arg pointer varies if it is not a fixed register. */
181 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
182 return 0;
183 if (x == pic_offset_table_rtx
184 /* ??? When call-clobbered, the value is stable modulo the restore
185 that must happen after a call. This currently screws up
186 local-alloc into believing that the restore is not needed, so we
187 must return 0 only if we are called from alias analysis. */
188 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
189 return 0;
190 return 1;
191
192 case LO_SUM:
193 /* The operand 0 of a LO_SUM is considered constant
194 (in fact it is related specifically to operand 1)
195 during alias analysis. */
196 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
197 || rtx_varies_p (XEXP (x, 1), for_alias);
198
199 case ASM_OPERANDS:
200 if (MEM_VOLATILE_P (x))
201 return 1;
202
203 /* Fall through. */
204
205 default:
206 break;
207 }
208
209 fmt = GET_RTX_FORMAT (code);
210 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
211 if (fmt[i] == 'e')
212 {
213 if (rtx_varies_p (XEXP (x, i), for_alias))
214 return 1;
215 }
216 else if (fmt[i] == 'E')
217 {
218 int j;
219 for (j = 0; j < XVECLEN (x, i); j++)
220 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
221 return 1;
222 }
223
224 return 0;
225 }
226
227 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
228 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
229 whether nonzero is returned for unaligned memory accesses on strict
230 alignment machines. */
231
232 static int
233 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
234 enum machine_mode mode, bool unaligned_mems)
235 {
236 enum rtx_code code = GET_CODE (x);
237
238 if (STRICT_ALIGNMENT
239 && unaligned_mems
240 && GET_MODE_SIZE (mode) != 0)
241 {
242 HOST_WIDE_INT actual_offset = offset;
243 #ifdef SPARC_STACK_BOUNDARY_HACK
244 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
245 the real alignment of %sp. However, when it does this, the
246 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
247 if (SPARC_STACK_BOUNDARY_HACK
248 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
249 actual_offset -= STACK_POINTER_OFFSET;
250 #endif
251
252 if (actual_offset % GET_MODE_SIZE (mode) != 0)
253 return 1;
254 }
255
256 switch (code)
257 {
258 case SYMBOL_REF:
259 if (SYMBOL_REF_WEAK (x))
260 return 1;
261 if (!CONSTANT_POOL_ADDRESS_P (x))
262 {
263 tree decl;
264 HOST_WIDE_INT decl_size;
265
266 if (offset < 0)
267 return 1;
268 if (size == 0)
269 size = GET_MODE_SIZE (mode);
270 if (size == 0)
271 return offset != 0;
272
273 /* If the size of the access or of the symbol is unknown,
274 assume the worst. */
275 decl = SYMBOL_REF_DECL (x);
276
277 /* Else check that the access is in bounds. TODO: restructure
278 expr_size/tree_expr_size/int_expr_size and just use the latter. */
279 if (!decl)
280 decl_size = -1;
281 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
282 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
283 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
284 : -1);
285 else if (TREE_CODE (decl) == STRING_CST)
286 decl_size = TREE_STRING_LENGTH (decl);
287 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
288 decl_size = int_size_in_bytes (TREE_TYPE (decl));
289 else
290 decl_size = -1;
291
292 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
293 }
294
295 return 0;
296
297 case LABEL_REF:
298 return 0;
299
300 case REG:
301 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
302 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
303 || x == stack_pointer_rtx
304 /* The arg pointer varies if it is not a fixed register. */
305 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
306 return 0;
307 /* All of the virtual frame registers are stack references. */
308 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
309 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
310 return 0;
311 return 1;
312
313 case CONST:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
315 mode, unaligned_mems);
316
317 case PLUS:
318 /* An address is assumed not to trap if:
319 - it is the pic register plus a constant. */
320 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
321 return 0;
322
323 /* - or it is an address that can't trap plus a constant integer,
324 with the proper remainder modulo the mode size if we are
325 considering unaligned memory references. */
326 if (CONST_INT_P (XEXP (x, 1))
327 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
328 size, mode, unaligned_mems))
329 return 0;
330
331 return 1;
332
333 case LO_SUM:
334 case PRE_MODIFY:
335 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
336 mode, unaligned_mems);
337
338 case PRE_DEC:
339 case PRE_INC:
340 case POST_DEC:
341 case POST_INC:
342 case POST_MODIFY:
343 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
344 mode, unaligned_mems);
345
346 default:
347 break;
348 }
349
350 /* If it isn't one of the case above, it can cause a trap. */
351 return 1;
352 }
353
354 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
355
356 int
357 rtx_addr_can_trap_p (const_rtx x)
358 {
359 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
360 }
361
362 /* Return true if X is an address that is known to not be zero. */
363
364 bool
365 nonzero_address_p (const_rtx x)
366 {
367 const enum rtx_code code = GET_CODE (x);
368
369 switch (code)
370 {
371 case SYMBOL_REF:
372 return !SYMBOL_REF_WEAK (x);
373
374 case LABEL_REF:
375 return true;
376
377 case REG:
378 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
379 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
380 || x == stack_pointer_rtx
381 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
382 return true;
383 /* All of the virtual frame registers are stack references. */
384 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
385 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
386 return true;
387 return false;
388
389 case CONST:
390 return nonzero_address_p (XEXP (x, 0));
391
392 case PLUS:
393 /* Handle PIC references. */
394 if (XEXP (x, 0) == pic_offset_table_rtx
395 && CONSTANT_P (XEXP (x, 1)))
396 return true;
397 return false;
398
399 case PRE_MODIFY:
400 /* Similar to the above; allow positive offsets. Further, since
401 auto-inc is only allowed in memories, the register must be a
402 pointer. */
403 if (CONST_INT_P (XEXP (x, 1))
404 && INTVAL (XEXP (x, 1)) > 0)
405 return true;
406 return nonzero_address_p (XEXP (x, 0));
407
408 case PRE_INC:
409 /* Similarly. Further, the offset is always positive. */
410 return true;
411
412 case PRE_DEC:
413 case POST_DEC:
414 case POST_INC:
415 case POST_MODIFY:
416 return nonzero_address_p (XEXP (x, 0));
417
418 case LO_SUM:
419 return nonzero_address_p (XEXP (x, 1));
420
421 default:
422 break;
423 }
424
425 /* If it isn't one of the case above, might be zero. */
426 return false;
427 }
428
429 /* Return 1 if X refers to a memory location whose address
430 cannot be compared reliably with constant addresses,
431 or if X refers to a BLKmode memory object.
432 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
433 zero, we are slightly more conservative. */
434
435 bool
436 rtx_addr_varies_p (const_rtx x, bool for_alias)
437 {
438 enum rtx_code code;
439 int i;
440 const char *fmt;
441
442 if (x == 0)
443 return 0;
444
445 code = GET_CODE (x);
446 if (code == MEM)
447 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
448
449 fmt = GET_RTX_FORMAT (code);
450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
451 if (fmt[i] == 'e')
452 {
453 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
454 return 1;
455 }
456 else if (fmt[i] == 'E')
457 {
458 int j;
459 for (j = 0; j < XVECLEN (x, i); j++)
460 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
461 return 1;
462 }
463 return 0;
464 }
465 \f
466 /* Return the CALL in X if there is one. */
467
468 rtx
469 get_call_rtx_from (rtx x)
470 {
471 if (INSN_P (x))
472 x = PATTERN (x);
473 if (GET_CODE (x) == PARALLEL)
474 x = XVECEXP (x, 0, 0);
475 if (GET_CODE (x) == SET)
476 x = SET_SRC (x);
477 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
478 return x;
479 return NULL_RTX;
480 }
481 \f
482 /* Return the value of the integer term in X, if one is apparent;
483 otherwise return 0.
484 Only obvious integer terms are detected.
485 This is used in cse.c with the `related_value' field. */
486
487 HOST_WIDE_INT
488 get_integer_term (const_rtx x)
489 {
490 if (GET_CODE (x) == CONST)
491 x = XEXP (x, 0);
492
493 if (GET_CODE (x) == MINUS
494 && CONST_INT_P (XEXP (x, 1)))
495 return - INTVAL (XEXP (x, 1));
496 if (GET_CODE (x) == PLUS
497 && CONST_INT_P (XEXP (x, 1)))
498 return INTVAL (XEXP (x, 1));
499 return 0;
500 }
501
502 /* If X is a constant, return the value sans apparent integer term;
503 otherwise return 0.
504 Only obvious integer terms are detected. */
505
506 rtx
507 get_related_value (const_rtx x)
508 {
509 if (GET_CODE (x) != CONST)
510 return 0;
511 x = XEXP (x, 0);
512 if (GET_CODE (x) == PLUS
513 && CONST_INT_P (XEXP (x, 1)))
514 return XEXP (x, 0);
515 else if (GET_CODE (x) == MINUS
516 && CONST_INT_P (XEXP (x, 1)))
517 return XEXP (x, 0);
518 return 0;
519 }
520 \f
521 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
522 to somewhere in the same object or object_block as SYMBOL. */
523
524 bool
525 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
526 {
527 tree decl;
528
529 if (GET_CODE (symbol) != SYMBOL_REF)
530 return false;
531
532 if (offset == 0)
533 return true;
534
535 if (offset > 0)
536 {
537 if (CONSTANT_POOL_ADDRESS_P (symbol)
538 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
539 return true;
540
541 decl = SYMBOL_REF_DECL (symbol);
542 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
543 return true;
544 }
545
546 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
547 && SYMBOL_REF_BLOCK (symbol)
548 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
549 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
550 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
551 return true;
552
553 return false;
554 }
555
556 /* Split X into a base and a constant offset, storing them in *BASE_OUT
557 and *OFFSET_OUT respectively. */
558
559 void
560 split_const (rtx x, rtx *base_out, rtx *offset_out)
561 {
562 if (GET_CODE (x) == CONST)
563 {
564 x = XEXP (x, 0);
565 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
566 {
567 *base_out = XEXP (x, 0);
568 *offset_out = XEXP (x, 1);
569 return;
570 }
571 }
572 *base_out = x;
573 *offset_out = const0_rtx;
574 }
575 \f
576 /* Return the number of places FIND appears within X. If COUNT_DEST is
577 zero, we do not count occurrences inside the destination of a SET. */
578
579 int
580 count_occurrences (const_rtx x, const_rtx find, int count_dest)
581 {
582 int i, j;
583 enum rtx_code code;
584 const char *format_ptr;
585 int count;
586
587 if (x == find)
588 return 1;
589
590 code = GET_CODE (x);
591
592 switch (code)
593 {
594 case REG:
595 CASE_CONST_ANY:
596 case SYMBOL_REF:
597 case CODE_LABEL:
598 case PC:
599 case CC0:
600 return 0;
601
602 case EXPR_LIST:
603 count = count_occurrences (XEXP (x, 0), find, count_dest);
604 if (XEXP (x, 1))
605 count += count_occurrences (XEXP (x, 1), find, count_dest);
606 return count;
607
608 case MEM:
609 if (MEM_P (find) && rtx_equal_p (x, find))
610 return 1;
611 break;
612
613 case SET:
614 if (SET_DEST (x) == find && ! count_dest)
615 return count_occurrences (SET_SRC (x), find, count_dest);
616 break;
617
618 default:
619 break;
620 }
621
622 format_ptr = GET_RTX_FORMAT (code);
623 count = 0;
624
625 for (i = 0; i < GET_RTX_LENGTH (code); i++)
626 {
627 switch (*format_ptr++)
628 {
629 case 'e':
630 count += count_occurrences (XEXP (x, i), find, count_dest);
631 break;
632
633 case 'E':
634 for (j = 0; j < XVECLEN (x, i); j++)
635 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
636 break;
637 }
638 }
639 return count;
640 }
641
642 \f
643 /* Return TRUE if OP is a register or subreg of a register that
644 holds an unsigned quantity. Otherwise, return FALSE. */
645
646 bool
647 unsigned_reg_p (rtx op)
648 {
649 if (REG_P (op)
650 && REG_EXPR (op)
651 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
652 return true;
653
654 if (GET_CODE (op) == SUBREG
655 && SUBREG_PROMOTED_UNSIGNED_P (op))
656 return true;
657
658 return false;
659 }
660
661 \f
662 /* Nonzero if register REG appears somewhere within IN.
663 Also works if REG is not a register; in this case it checks
664 for a subexpression of IN that is Lisp "equal" to REG. */
665
666 int
667 reg_mentioned_p (const_rtx reg, const_rtx in)
668 {
669 const char *fmt;
670 int i;
671 enum rtx_code code;
672
673 if (in == 0)
674 return 0;
675
676 if (reg == in)
677 return 1;
678
679 if (GET_CODE (in) == LABEL_REF)
680 return reg == XEXP (in, 0);
681
682 code = GET_CODE (in);
683
684 switch (code)
685 {
686 /* Compare registers by number. */
687 case REG:
688 return REG_P (reg) && REGNO (in) == REGNO (reg);
689
690 /* These codes have no constituent expressions
691 and are unique. */
692 case SCRATCH:
693 case CC0:
694 case PC:
695 return 0;
696
697 CASE_CONST_ANY:
698 /* These are kept unique for a given value. */
699 return 0;
700
701 default:
702 break;
703 }
704
705 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
706 return 1;
707
708 fmt = GET_RTX_FORMAT (code);
709
710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
711 {
712 if (fmt[i] == 'E')
713 {
714 int j;
715 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
716 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
717 return 1;
718 }
719 else if (fmt[i] == 'e'
720 && reg_mentioned_p (reg, XEXP (in, i)))
721 return 1;
722 }
723 return 0;
724 }
725 \f
726 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
727 no CODE_LABEL insn. */
728
729 int
730 no_labels_between_p (const_rtx beg, const_rtx end)
731 {
732 rtx p;
733 if (beg == end)
734 return 0;
735 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
736 if (LABEL_P (p))
737 return 0;
738 return 1;
739 }
740
741 /* Nonzero if register REG is used in an insn between
742 FROM_INSN and TO_INSN (exclusive of those two). */
743
744 int
745 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
746 {
747 rtx insn;
748
749 if (from_insn == to_insn)
750 return 0;
751
752 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
753 if (NONDEBUG_INSN_P (insn)
754 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
755 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
756 return 1;
757 return 0;
758 }
759 \f
760 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
761 is entirely replaced by a new value and the only use is as a SET_DEST,
762 we do not consider it a reference. */
763
764 int
765 reg_referenced_p (const_rtx x, const_rtx body)
766 {
767 int i;
768
769 switch (GET_CODE (body))
770 {
771 case SET:
772 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
773 return 1;
774
775 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
776 of a REG that occupies all of the REG, the insn references X if
777 it is mentioned in the destination. */
778 if (GET_CODE (SET_DEST (body)) != CC0
779 && GET_CODE (SET_DEST (body)) != PC
780 && !REG_P (SET_DEST (body))
781 && ! (GET_CODE (SET_DEST (body)) == SUBREG
782 && REG_P (SUBREG_REG (SET_DEST (body)))
783 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
784 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
785 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
786 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
787 && reg_overlap_mentioned_p (x, SET_DEST (body)))
788 return 1;
789 return 0;
790
791 case ASM_OPERANDS:
792 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
793 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
794 return 1;
795 return 0;
796
797 case CALL:
798 case USE:
799 case IF_THEN_ELSE:
800 return reg_overlap_mentioned_p (x, body);
801
802 case TRAP_IF:
803 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
804
805 case PREFETCH:
806 return reg_overlap_mentioned_p (x, XEXP (body, 0));
807
808 case UNSPEC:
809 case UNSPEC_VOLATILE:
810 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
811 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
812 return 1;
813 return 0;
814
815 case PARALLEL:
816 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
817 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
818 return 1;
819 return 0;
820
821 case CLOBBER:
822 if (MEM_P (XEXP (body, 0)))
823 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
824 return 1;
825 return 0;
826
827 case COND_EXEC:
828 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
829 return 1;
830 return reg_referenced_p (x, COND_EXEC_CODE (body));
831
832 default:
833 return 0;
834 }
835 }
836 \f
837 /* Nonzero if register REG is set or clobbered in an insn between
838 FROM_INSN and TO_INSN (exclusive of those two). */
839
840 int
841 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
842 {
843 const_rtx insn;
844
845 if (from_insn == to_insn)
846 return 0;
847
848 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
849 if (INSN_P (insn) && reg_set_p (reg, insn))
850 return 1;
851 return 0;
852 }
853
854 /* Internals of reg_set_between_p. */
855 int
856 reg_set_p (const_rtx reg, const_rtx insn)
857 {
858 /* We can be passed an insn or part of one. If we are passed an insn,
859 check if a side-effect of the insn clobbers REG. */
860 if (INSN_P (insn)
861 && (FIND_REG_INC_NOTE (insn, reg)
862 || (CALL_P (insn)
863 && ((REG_P (reg)
864 && REGNO (reg) < FIRST_PSEUDO_REGISTER
865 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
866 GET_MODE (reg), REGNO (reg)))
867 || MEM_P (reg)
868 || find_reg_fusage (insn, CLOBBER, reg)))))
869 return 1;
870
871 return set_of (reg, insn) != NULL_RTX;
872 }
873
874 /* Similar to reg_set_between_p, but check all registers in X. Return 0
875 only if none of them are modified between START and END. Return 1 if
876 X contains a MEM; this routine does use memory aliasing. */
877
878 int
879 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
880 {
881 const enum rtx_code code = GET_CODE (x);
882 const char *fmt;
883 int i, j;
884 rtx insn;
885
886 if (start == end)
887 return 0;
888
889 switch (code)
890 {
891 CASE_CONST_ANY:
892 case CONST:
893 case SYMBOL_REF:
894 case LABEL_REF:
895 return 0;
896
897 case PC:
898 case CC0:
899 return 1;
900
901 case MEM:
902 if (modified_between_p (XEXP (x, 0), start, end))
903 return 1;
904 if (MEM_READONLY_P (x))
905 return 0;
906 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
907 if (memory_modified_in_insn_p (x, insn))
908 return 1;
909 return 0;
910 break;
911
912 case REG:
913 return reg_set_between_p (x, start, end);
914
915 default:
916 break;
917 }
918
919 fmt = GET_RTX_FORMAT (code);
920 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
921 {
922 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
923 return 1;
924
925 else if (fmt[i] == 'E')
926 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
927 if (modified_between_p (XVECEXP (x, i, j), start, end))
928 return 1;
929 }
930
931 return 0;
932 }
933
934 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
935 of them are modified in INSN. Return 1 if X contains a MEM; this routine
936 does use memory aliasing. */
937
938 int
939 modified_in_p (const_rtx x, const_rtx insn)
940 {
941 const enum rtx_code code = GET_CODE (x);
942 const char *fmt;
943 int i, j;
944
945 switch (code)
946 {
947 CASE_CONST_ANY:
948 case CONST:
949 case SYMBOL_REF:
950 case LABEL_REF:
951 return 0;
952
953 case PC:
954 case CC0:
955 return 1;
956
957 case MEM:
958 if (modified_in_p (XEXP (x, 0), insn))
959 return 1;
960 if (MEM_READONLY_P (x))
961 return 0;
962 if (memory_modified_in_insn_p (x, insn))
963 return 1;
964 return 0;
965 break;
966
967 case REG:
968 return reg_set_p (x, insn);
969
970 default:
971 break;
972 }
973
974 fmt = GET_RTX_FORMAT (code);
975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
976 {
977 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
978 return 1;
979
980 else if (fmt[i] == 'E')
981 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
982 if (modified_in_p (XVECEXP (x, i, j), insn))
983 return 1;
984 }
985
986 return 0;
987 }
988 \f
989 /* Helper function for set_of. */
990 struct set_of_data
991 {
992 const_rtx found;
993 const_rtx pat;
994 };
995
996 static void
997 set_of_1 (rtx x, const_rtx pat, void *data1)
998 {
999 struct set_of_data *const data = (struct set_of_data *) (data1);
1000 if (rtx_equal_p (x, data->pat)
1001 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1002 data->found = pat;
1003 }
1004
1005 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1006 (either directly or via STRICT_LOW_PART and similar modifiers). */
1007 const_rtx
1008 set_of (const_rtx pat, const_rtx insn)
1009 {
1010 struct set_of_data data;
1011 data.found = NULL_RTX;
1012 data.pat = pat;
1013 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1014 return data.found;
1015 }
1016
1017 /* This function, called through note_stores, collects sets and
1018 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1019 by DATA. */
1020 void
1021 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1022 {
1023 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1024 if (REG_P (x) && HARD_REGISTER_P (x))
1025 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1026 }
1027
1028 /* Examine INSN, and compute the set of hard registers written by it.
1029 Store it in *PSET. Should only be called after reload. */
1030 void
1031 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
1032 {
1033 rtx link;
1034
1035 CLEAR_HARD_REG_SET (*pset);
1036 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1037 if (CALL_P (insn))
1038 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1039 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1040 if (REG_NOTE_KIND (link) == REG_INC)
1041 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1042 }
1043
1044 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1045 static int
1046 record_hard_reg_uses_1 (rtx *px, void *data)
1047 {
1048 rtx x = *px;
1049 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1050
1051 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1052 {
1053 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1054 while (nregs-- > 0)
1055 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1056 }
1057 return 0;
1058 }
1059
1060 /* Like record_hard_reg_sets, but called through note_uses. */
1061 void
1062 record_hard_reg_uses (rtx *px, void *data)
1063 {
1064 for_each_rtx (px, record_hard_reg_uses_1, data);
1065 }
1066 \f
1067 /* Given an INSN, return a SET expression if this insn has only a single SET.
1068 It may also have CLOBBERs, USEs, or SET whose output
1069 will not be used, which we ignore. */
1070
1071 rtx
1072 single_set_2 (const_rtx insn, const_rtx pat)
1073 {
1074 rtx set = NULL;
1075 int set_verified = 1;
1076 int i;
1077
1078 if (GET_CODE (pat) == PARALLEL)
1079 {
1080 for (i = 0; i < XVECLEN (pat, 0); i++)
1081 {
1082 rtx sub = XVECEXP (pat, 0, i);
1083 switch (GET_CODE (sub))
1084 {
1085 case USE:
1086 case CLOBBER:
1087 break;
1088
1089 case SET:
1090 /* We can consider insns having multiple sets, where all
1091 but one are dead as single set insns. In common case
1092 only single set is present in the pattern so we want
1093 to avoid checking for REG_UNUSED notes unless necessary.
1094
1095 When we reach set first time, we just expect this is
1096 the single set we are looking for and only when more
1097 sets are found in the insn, we check them. */
1098 if (!set_verified)
1099 {
1100 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1101 && !side_effects_p (set))
1102 set = NULL;
1103 else
1104 set_verified = 1;
1105 }
1106 if (!set)
1107 set = sub, set_verified = 0;
1108 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1109 || side_effects_p (sub))
1110 return NULL_RTX;
1111 break;
1112
1113 default:
1114 return NULL_RTX;
1115 }
1116 }
1117 }
1118 return set;
1119 }
1120
1121 /* Given an INSN, return nonzero if it has more than one SET, else return
1122 zero. */
1123
1124 int
1125 multiple_sets (const_rtx insn)
1126 {
1127 int found;
1128 int i;
1129
1130 /* INSN must be an insn. */
1131 if (! INSN_P (insn))
1132 return 0;
1133
1134 /* Only a PARALLEL can have multiple SETs. */
1135 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1136 {
1137 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1138 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1139 {
1140 /* If we have already found a SET, then return now. */
1141 if (found)
1142 return 1;
1143 else
1144 found = 1;
1145 }
1146 }
1147
1148 /* Either zero or one SET. */
1149 return 0;
1150 }
1151 \f
1152 /* Return nonzero if the destination of SET equals the source
1153 and there are no side effects. */
1154
1155 int
1156 set_noop_p (const_rtx set)
1157 {
1158 rtx src = SET_SRC (set);
1159 rtx dst = SET_DEST (set);
1160
1161 if (dst == pc_rtx && src == pc_rtx)
1162 return 1;
1163
1164 if (MEM_P (dst) && MEM_P (src))
1165 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1166
1167 if (GET_CODE (dst) == ZERO_EXTRACT)
1168 return rtx_equal_p (XEXP (dst, 0), src)
1169 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1170 && !side_effects_p (src);
1171
1172 if (GET_CODE (dst) == STRICT_LOW_PART)
1173 dst = XEXP (dst, 0);
1174
1175 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1176 {
1177 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1178 return 0;
1179 src = SUBREG_REG (src);
1180 dst = SUBREG_REG (dst);
1181 }
1182
1183 return (REG_P (src) && REG_P (dst)
1184 && REGNO (src) == REGNO (dst));
1185 }
1186 \f
1187 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1188 value to itself. */
1189
1190 int
1191 noop_move_p (const_rtx insn)
1192 {
1193 rtx pat = PATTERN (insn);
1194
1195 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1196 return 1;
1197
1198 /* Insns carrying these notes are useful later on. */
1199 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1200 return 0;
1201
1202 /* Check the code to be executed for COND_EXEC. */
1203 if (GET_CODE (pat) == COND_EXEC)
1204 pat = COND_EXEC_CODE (pat);
1205
1206 if (GET_CODE (pat) == SET && set_noop_p (pat))
1207 return 1;
1208
1209 if (GET_CODE (pat) == PARALLEL)
1210 {
1211 int i;
1212 /* If nothing but SETs of registers to themselves,
1213 this insn can also be deleted. */
1214 for (i = 0; i < XVECLEN (pat, 0); i++)
1215 {
1216 rtx tem = XVECEXP (pat, 0, i);
1217
1218 if (GET_CODE (tem) == USE
1219 || GET_CODE (tem) == CLOBBER)
1220 continue;
1221
1222 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1223 return 0;
1224 }
1225
1226 return 1;
1227 }
1228 return 0;
1229 }
1230 \f
1231
1232 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1233 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1234 If the object was modified, if we hit a partial assignment to X, or hit a
1235 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1236 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1237 be the src. */
1238
1239 rtx
1240 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1241 {
1242 rtx p;
1243
1244 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1245 p = PREV_INSN (p))
1246 if (INSN_P (p))
1247 {
1248 rtx set = single_set (p);
1249 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1250
1251 if (set && rtx_equal_p (x, SET_DEST (set)))
1252 {
1253 rtx src = SET_SRC (set);
1254
1255 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1256 src = XEXP (note, 0);
1257
1258 if ((valid_to == NULL_RTX
1259 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1260 /* Reject hard registers because we don't usually want
1261 to use them; we'd rather use a pseudo. */
1262 && (! (REG_P (src)
1263 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1264 {
1265 *pinsn = p;
1266 return src;
1267 }
1268 }
1269
1270 /* If set in non-simple way, we don't have a value. */
1271 if (reg_set_p (x, p))
1272 break;
1273 }
1274
1275 return x;
1276 }
1277 \f
1278 /* Return nonzero if register in range [REGNO, ENDREGNO)
1279 appears either explicitly or implicitly in X
1280 other than being stored into.
1281
1282 References contained within the substructure at LOC do not count.
1283 LOC may be zero, meaning don't ignore anything. */
1284
1285 int
1286 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1287 rtx *loc)
1288 {
1289 int i;
1290 unsigned int x_regno;
1291 RTX_CODE code;
1292 const char *fmt;
1293
1294 repeat:
1295 /* The contents of a REG_NONNEG note is always zero, so we must come here
1296 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1297 if (x == 0)
1298 return 0;
1299
1300 code = GET_CODE (x);
1301
1302 switch (code)
1303 {
1304 case REG:
1305 x_regno = REGNO (x);
1306
1307 /* If we modifying the stack, frame, or argument pointer, it will
1308 clobber a virtual register. In fact, we could be more precise,
1309 but it isn't worth it. */
1310 if ((x_regno == STACK_POINTER_REGNUM
1311 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1312 || x_regno == ARG_POINTER_REGNUM
1313 #endif
1314 || x_regno == FRAME_POINTER_REGNUM)
1315 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1316 return 1;
1317
1318 return endregno > x_regno && regno < END_REGNO (x);
1319
1320 case SUBREG:
1321 /* If this is a SUBREG of a hard reg, we can see exactly which
1322 registers are being modified. Otherwise, handle normally. */
1323 if (REG_P (SUBREG_REG (x))
1324 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1325 {
1326 unsigned int inner_regno = subreg_regno (x);
1327 unsigned int inner_endregno
1328 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1329 ? subreg_nregs (x) : 1);
1330
1331 return endregno > inner_regno && regno < inner_endregno;
1332 }
1333 break;
1334
1335 case CLOBBER:
1336 case SET:
1337 if (&SET_DEST (x) != loc
1338 /* Note setting a SUBREG counts as referring to the REG it is in for
1339 a pseudo but not for hard registers since we can
1340 treat each word individually. */
1341 && ((GET_CODE (SET_DEST (x)) == SUBREG
1342 && loc != &SUBREG_REG (SET_DEST (x))
1343 && REG_P (SUBREG_REG (SET_DEST (x)))
1344 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1345 && refers_to_regno_p (regno, endregno,
1346 SUBREG_REG (SET_DEST (x)), loc))
1347 || (!REG_P (SET_DEST (x))
1348 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1349 return 1;
1350
1351 if (code == CLOBBER || loc == &SET_SRC (x))
1352 return 0;
1353 x = SET_SRC (x);
1354 goto repeat;
1355
1356 default:
1357 break;
1358 }
1359
1360 /* X does not match, so try its subexpressions. */
1361
1362 fmt = GET_RTX_FORMAT (code);
1363 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1364 {
1365 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1366 {
1367 if (i == 0)
1368 {
1369 x = XEXP (x, 0);
1370 goto repeat;
1371 }
1372 else
1373 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1374 return 1;
1375 }
1376 else if (fmt[i] == 'E')
1377 {
1378 int j;
1379 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1380 if (loc != &XVECEXP (x, i, j)
1381 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1382 return 1;
1383 }
1384 }
1385 return 0;
1386 }
1387
1388 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1389 we check if any register number in X conflicts with the relevant register
1390 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1391 contains a MEM (we don't bother checking for memory addresses that can't
1392 conflict because we expect this to be a rare case. */
1393
1394 int
1395 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1396 {
1397 unsigned int regno, endregno;
1398
1399 /* If either argument is a constant, then modifying X can not
1400 affect IN. Here we look at IN, we can profitably combine
1401 CONSTANT_P (x) with the switch statement below. */
1402 if (CONSTANT_P (in))
1403 return 0;
1404
1405 recurse:
1406 switch (GET_CODE (x))
1407 {
1408 case STRICT_LOW_PART:
1409 case ZERO_EXTRACT:
1410 case SIGN_EXTRACT:
1411 /* Overly conservative. */
1412 x = XEXP (x, 0);
1413 goto recurse;
1414
1415 case SUBREG:
1416 regno = REGNO (SUBREG_REG (x));
1417 if (regno < FIRST_PSEUDO_REGISTER)
1418 regno = subreg_regno (x);
1419 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1420 ? subreg_nregs (x) : 1);
1421 goto do_reg;
1422
1423 case REG:
1424 regno = REGNO (x);
1425 endregno = END_REGNO (x);
1426 do_reg:
1427 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1428
1429 case MEM:
1430 {
1431 const char *fmt;
1432 int i;
1433
1434 if (MEM_P (in))
1435 return 1;
1436
1437 fmt = GET_RTX_FORMAT (GET_CODE (in));
1438 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1439 if (fmt[i] == 'e')
1440 {
1441 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1442 return 1;
1443 }
1444 else if (fmt[i] == 'E')
1445 {
1446 int j;
1447 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1448 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1449 return 1;
1450 }
1451
1452 return 0;
1453 }
1454
1455 case SCRATCH:
1456 case PC:
1457 case CC0:
1458 return reg_mentioned_p (x, in);
1459
1460 case PARALLEL:
1461 {
1462 int i;
1463
1464 /* If any register in here refers to it we return true. */
1465 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1466 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1467 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1468 return 1;
1469 return 0;
1470 }
1471
1472 default:
1473 gcc_assert (CONSTANT_P (x));
1474 return 0;
1475 }
1476 }
1477 \f
1478 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1479 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1480 ignored by note_stores, but passed to FUN.
1481
1482 FUN receives three arguments:
1483 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1484 2. the SET or CLOBBER rtx that does the store,
1485 3. the pointer DATA provided to note_stores.
1486
1487 If the item being stored in or clobbered is a SUBREG of a hard register,
1488 the SUBREG will be passed. */
1489
1490 void
1491 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1492 {
1493 int i;
1494
1495 if (GET_CODE (x) == COND_EXEC)
1496 x = COND_EXEC_CODE (x);
1497
1498 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1499 {
1500 rtx dest = SET_DEST (x);
1501
1502 while ((GET_CODE (dest) == SUBREG
1503 && (!REG_P (SUBREG_REG (dest))
1504 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1505 || GET_CODE (dest) == ZERO_EXTRACT
1506 || GET_CODE (dest) == STRICT_LOW_PART)
1507 dest = XEXP (dest, 0);
1508
1509 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1510 each of whose first operand is a register. */
1511 if (GET_CODE (dest) == PARALLEL)
1512 {
1513 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1514 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1515 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1516 }
1517 else
1518 (*fun) (dest, x, data);
1519 }
1520
1521 else if (GET_CODE (x) == PARALLEL)
1522 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1523 note_stores (XVECEXP (x, 0, i), fun, data);
1524 }
1525 \f
1526 /* Like notes_stores, but call FUN for each expression that is being
1527 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1528 FUN for each expression, not any interior subexpressions. FUN receives a
1529 pointer to the expression and the DATA passed to this function.
1530
1531 Note that this is not quite the same test as that done in reg_referenced_p
1532 since that considers something as being referenced if it is being
1533 partially set, while we do not. */
1534
1535 void
1536 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1537 {
1538 rtx body = *pbody;
1539 int i;
1540
1541 switch (GET_CODE (body))
1542 {
1543 case COND_EXEC:
1544 (*fun) (&COND_EXEC_TEST (body), data);
1545 note_uses (&COND_EXEC_CODE (body), fun, data);
1546 return;
1547
1548 case PARALLEL:
1549 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1550 note_uses (&XVECEXP (body, 0, i), fun, data);
1551 return;
1552
1553 case SEQUENCE:
1554 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1555 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1556 return;
1557
1558 case USE:
1559 (*fun) (&XEXP (body, 0), data);
1560 return;
1561
1562 case ASM_OPERANDS:
1563 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1564 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1565 return;
1566
1567 case TRAP_IF:
1568 (*fun) (&TRAP_CONDITION (body), data);
1569 return;
1570
1571 case PREFETCH:
1572 (*fun) (&XEXP (body, 0), data);
1573 return;
1574
1575 case UNSPEC:
1576 case UNSPEC_VOLATILE:
1577 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1578 (*fun) (&XVECEXP (body, 0, i), data);
1579 return;
1580
1581 case CLOBBER:
1582 if (MEM_P (XEXP (body, 0)))
1583 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1584 return;
1585
1586 case SET:
1587 {
1588 rtx dest = SET_DEST (body);
1589
1590 /* For sets we replace everything in source plus registers in memory
1591 expression in store and operands of a ZERO_EXTRACT. */
1592 (*fun) (&SET_SRC (body), data);
1593
1594 if (GET_CODE (dest) == ZERO_EXTRACT)
1595 {
1596 (*fun) (&XEXP (dest, 1), data);
1597 (*fun) (&XEXP (dest, 2), data);
1598 }
1599
1600 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1601 dest = XEXP (dest, 0);
1602
1603 if (MEM_P (dest))
1604 (*fun) (&XEXP (dest, 0), data);
1605 }
1606 return;
1607
1608 default:
1609 /* All the other possibilities never store. */
1610 (*fun) (pbody, data);
1611 return;
1612 }
1613 }
1614 \f
1615 /* Return nonzero if X's old contents don't survive after INSN.
1616 This will be true if X is (cc0) or if X is a register and
1617 X dies in INSN or because INSN entirely sets X.
1618
1619 "Entirely set" means set directly and not through a SUBREG, or
1620 ZERO_EXTRACT, so no trace of the old contents remains.
1621 Likewise, REG_INC does not count.
1622
1623 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1624 but for this use that makes no difference, since regs don't overlap
1625 during their lifetimes. Therefore, this function may be used
1626 at any time after deaths have been computed.
1627
1628 If REG is a hard reg that occupies multiple machine registers, this
1629 function will only return 1 if each of those registers will be replaced
1630 by INSN. */
1631
1632 int
1633 dead_or_set_p (const_rtx insn, const_rtx x)
1634 {
1635 unsigned int regno, end_regno;
1636 unsigned int i;
1637
1638 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1639 if (GET_CODE (x) == CC0)
1640 return 1;
1641
1642 gcc_assert (REG_P (x));
1643
1644 regno = REGNO (x);
1645 end_regno = END_REGNO (x);
1646 for (i = regno; i < end_regno; i++)
1647 if (! dead_or_set_regno_p (insn, i))
1648 return 0;
1649
1650 return 1;
1651 }
1652
1653 /* Return TRUE iff DEST is a register or subreg of a register and
1654 doesn't change the number of words of the inner register, and any
1655 part of the register is TEST_REGNO. */
1656
1657 static bool
1658 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1659 {
1660 unsigned int regno, endregno;
1661
1662 if (GET_CODE (dest) == SUBREG
1663 && (((GET_MODE_SIZE (GET_MODE (dest))
1664 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1665 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1666 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1667 dest = SUBREG_REG (dest);
1668
1669 if (!REG_P (dest))
1670 return false;
1671
1672 regno = REGNO (dest);
1673 endregno = END_REGNO (dest);
1674 return (test_regno >= regno && test_regno < endregno);
1675 }
1676
1677 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1678 any member matches the covers_regno_no_parallel_p criteria. */
1679
1680 static bool
1681 covers_regno_p (const_rtx dest, unsigned int test_regno)
1682 {
1683 if (GET_CODE (dest) == PARALLEL)
1684 {
1685 /* Some targets place small structures in registers for return
1686 values of functions, and those registers are wrapped in
1687 PARALLELs that we may see as the destination of a SET. */
1688 int i;
1689
1690 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1691 {
1692 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1693 if (inner != NULL_RTX
1694 && covers_regno_no_parallel_p (inner, test_regno))
1695 return true;
1696 }
1697
1698 return false;
1699 }
1700 else
1701 return covers_regno_no_parallel_p (dest, test_regno);
1702 }
1703
1704 /* Utility function for dead_or_set_p to check an individual register. */
1705
1706 int
1707 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1708 {
1709 const_rtx pattern;
1710
1711 /* See if there is a death note for something that includes TEST_REGNO. */
1712 if (find_regno_note (insn, REG_DEAD, test_regno))
1713 return 1;
1714
1715 if (CALL_P (insn)
1716 && find_regno_fusage (insn, CLOBBER, test_regno))
1717 return 1;
1718
1719 pattern = PATTERN (insn);
1720
1721 /* If a COND_EXEC is not executed, the value survives. */
1722 if (GET_CODE (pattern) == COND_EXEC)
1723 return 0;
1724
1725 if (GET_CODE (pattern) == SET)
1726 return covers_regno_p (SET_DEST (pattern), test_regno);
1727 else if (GET_CODE (pattern) == PARALLEL)
1728 {
1729 int i;
1730
1731 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1732 {
1733 rtx body = XVECEXP (pattern, 0, i);
1734
1735 if (GET_CODE (body) == COND_EXEC)
1736 body = COND_EXEC_CODE (body);
1737
1738 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1739 && covers_regno_p (SET_DEST (body), test_regno))
1740 return 1;
1741 }
1742 }
1743
1744 return 0;
1745 }
1746
1747 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1748 If DATUM is nonzero, look for one whose datum is DATUM. */
1749
1750 rtx
1751 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1752 {
1753 rtx link;
1754
1755 gcc_checking_assert (insn);
1756
1757 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1758 if (! INSN_P (insn))
1759 return 0;
1760 if (datum == 0)
1761 {
1762 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1763 if (REG_NOTE_KIND (link) == kind)
1764 return link;
1765 return 0;
1766 }
1767
1768 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1769 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1770 return link;
1771 return 0;
1772 }
1773
1774 /* Return the reg-note of kind KIND in insn INSN which applies to register
1775 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1776 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1777 it might be the case that the note overlaps REGNO. */
1778
1779 rtx
1780 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1781 {
1782 rtx link;
1783
1784 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1785 if (! INSN_P (insn))
1786 return 0;
1787
1788 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1789 if (REG_NOTE_KIND (link) == kind
1790 /* Verify that it is a register, so that scratch and MEM won't cause a
1791 problem here. */
1792 && REG_P (XEXP (link, 0))
1793 && REGNO (XEXP (link, 0)) <= regno
1794 && END_REGNO (XEXP (link, 0)) > regno)
1795 return link;
1796 return 0;
1797 }
1798
1799 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1800 has such a note. */
1801
1802 rtx
1803 find_reg_equal_equiv_note (const_rtx insn)
1804 {
1805 rtx link;
1806
1807 if (!INSN_P (insn))
1808 return 0;
1809
1810 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1811 if (REG_NOTE_KIND (link) == REG_EQUAL
1812 || REG_NOTE_KIND (link) == REG_EQUIV)
1813 {
1814 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1815 insns that have multiple sets. Checking single_set to
1816 make sure of this is not the proper check, as explained
1817 in the comment in set_unique_reg_note.
1818
1819 This should be changed into an assert. */
1820 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1821 return 0;
1822 return link;
1823 }
1824 return NULL;
1825 }
1826
1827 /* Check whether INSN is a single_set whose source is known to be
1828 equivalent to a constant. Return that constant if so, otherwise
1829 return null. */
1830
1831 rtx
1832 find_constant_src (const_rtx insn)
1833 {
1834 rtx note, set, x;
1835
1836 set = single_set (insn);
1837 if (set)
1838 {
1839 x = avoid_constant_pool_reference (SET_SRC (set));
1840 if (CONSTANT_P (x))
1841 return x;
1842 }
1843
1844 note = find_reg_equal_equiv_note (insn);
1845 if (note && CONSTANT_P (XEXP (note, 0)))
1846 return XEXP (note, 0);
1847
1848 return NULL_RTX;
1849 }
1850
1851 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1852 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1853
1854 int
1855 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1856 {
1857 /* If it's not a CALL_INSN, it can't possibly have a
1858 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1859 if (!CALL_P (insn))
1860 return 0;
1861
1862 gcc_assert (datum);
1863
1864 if (!REG_P (datum))
1865 {
1866 rtx link;
1867
1868 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1869 link;
1870 link = XEXP (link, 1))
1871 if (GET_CODE (XEXP (link, 0)) == code
1872 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1873 return 1;
1874 }
1875 else
1876 {
1877 unsigned int regno = REGNO (datum);
1878
1879 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1880 to pseudo registers, so don't bother checking. */
1881
1882 if (regno < FIRST_PSEUDO_REGISTER)
1883 {
1884 unsigned int end_regno = END_HARD_REGNO (datum);
1885 unsigned int i;
1886
1887 for (i = regno; i < end_regno; i++)
1888 if (find_regno_fusage (insn, code, i))
1889 return 1;
1890 }
1891 }
1892
1893 return 0;
1894 }
1895
1896 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1897 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1898
1899 int
1900 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1901 {
1902 rtx link;
1903
1904 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1905 to pseudo registers, so don't bother checking. */
1906
1907 if (regno >= FIRST_PSEUDO_REGISTER
1908 || !CALL_P (insn) )
1909 return 0;
1910
1911 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1912 {
1913 rtx op, reg;
1914
1915 if (GET_CODE (op = XEXP (link, 0)) == code
1916 && REG_P (reg = XEXP (op, 0))
1917 && REGNO (reg) <= regno
1918 && END_HARD_REGNO (reg) > regno)
1919 return 1;
1920 }
1921
1922 return 0;
1923 }
1924
1925 \f
1926 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1927 stored as the pointer to the next register note. */
1928
1929 rtx
1930 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1931 {
1932 rtx note;
1933
1934 switch (kind)
1935 {
1936 case REG_CC_SETTER:
1937 case REG_CC_USER:
1938 case REG_LABEL_TARGET:
1939 case REG_LABEL_OPERAND:
1940 case REG_TM:
1941 /* These types of register notes use an INSN_LIST rather than an
1942 EXPR_LIST, so that copying is done right and dumps look
1943 better. */
1944 note = alloc_INSN_LIST (datum, list);
1945 PUT_REG_NOTE_KIND (note, kind);
1946 break;
1947
1948 default:
1949 note = alloc_EXPR_LIST (kind, datum, list);
1950 break;
1951 }
1952
1953 return note;
1954 }
1955
1956 /* Add register note with kind KIND and datum DATUM to INSN. */
1957
1958 void
1959 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1960 {
1961 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1962 }
1963
1964 /* Remove register note NOTE from the REG_NOTES of INSN. */
1965
1966 void
1967 remove_note (rtx insn, const_rtx note)
1968 {
1969 rtx link;
1970
1971 if (note == NULL_RTX)
1972 return;
1973
1974 if (REG_NOTES (insn) == note)
1975 REG_NOTES (insn) = XEXP (note, 1);
1976 else
1977 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1978 if (XEXP (link, 1) == note)
1979 {
1980 XEXP (link, 1) = XEXP (note, 1);
1981 break;
1982 }
1983
1984 switch (REG_NOTE_KIND (note))
1985 {
1986 case REG_EQUAL:
1987 case REG_EQUIV:
1988 df_notes_rescan (insn);
1989 break;
1990 default:
1991 break;
1992 }
1993 }
1994
1995 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1996
1997 void
1998 remove_reg_equal_equiv_notes (rtx insn)
1999 {
2000 rtx *loc;
2001
2002 loc = &REG_NOTES (insn);
2003 while (*loc)
2004 {
2005 enum reg_note kind = REG_NOTE_KIND (*loc);
2006 if (kind == REG_EQUAL || kind == REG_EQUIV)
2007 *loc = XEXP (*loc, 1);
2008 else
2009 loc = &XEXP (*loc, 1);
2010 }
2011 }
2012
2013 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2014
2015 void
2016 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2017 {
2018 df_ref eq_use;
2019
2020 if (!df)
2021 return;
2022
2023 /* This loop is a little tricky. We cannot just go down the chain because
2024 it is being modified by some actions in the loop. So we just iterate
2025 over the head. We plan to drain the list anyway. */
2026 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2027 {
2028 rtx insn = DF_REF_INSN (eq_use);
2029 rtx note = find_reg_equal_equiv_note (insn);
2030
2031 /* This assert is generally triggered when someone deletes a REG_EQUAL
2032 or REG_EQUIV note by hacking the list manually rather than calling
2033 remove_note. */
2034 gcc_assert (note);
2035
2036 remove_note (insn, note);
2037 }
2038 }
2039
2040 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2041 return 1 if it is found. A simple equality test is used to determine if
2042 NODE matches. */
2043
2044 int
2045 in_expr_list_p (const_rtx listp, const_rtx node)
2046 {
2047 const_rtx x;
2048
2049 for (x = listp; x; x = XEXP (x, 1))
2050 if (node == XEXP (x, 0))
2051 return 1;
2052
2053 return 0;
2054 }
2055
2056 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2057 remove that entry from the list if it is found.
2058
2059 A simple equality test is used to determine if NODE matches. */
2060
2061 void
2062 remove_node_from_expr_list (const_rtx node, rtx *listp)
2063 {
2064 rtx temp = *listp;
2065 rtx prev = NULL_RTX;
2066
2067 while (temp)
2068 {
2069 if (node == XEXP (temp, 0))
2070 {
2071 /* Splice the node out of the list. */
2072 if (prev)
2073 XEXP (prev, 1) = XEXP (temp, 1);
2074 else
2075 *listp = XEXP (temp, 1);
2076
2077 return;
2078 }
2079
2080 prev = temp;
2081 temp = XEXP (temp, 1);
2082 }
2083 }
2084 \f
2085 /* Nonzero if X contains any volatile instructions. These are instructions
2086 which may cause unpredictable machine state instructions, and thus no
2087 instructions or register uses should be moved or combined across them.
2088 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2089
2090 int
2091 volatile_insn_p (const_rtx x)
2092 {
2093 const RTX_CODE code = GET_CODE (x);
2094 switch (code)
2095 {
2096 case LABEL_REF:
2097 case SYMBOL_REF:
2098 case CONST:
2099 CASE_CONST_ANY:
2100 case CC0:
2101 case PC:
2102 case REG:
2103 case SCRATCH:
2104 case CLOBBER:
2105 case ADDR_VEC:
2106 case ADDR_DIFF_VEC:
2107 case CALL:
2108 case MEM:
2109 return 0;
2110
2111 case UNSPEC_VOLATILE:
2112 return 1;
2113
2114 case ASM_INPUT:
2115 case ASM_OPERANDS:
2116 if (MEM_VOLATILE_P (x))
2117 return 1;
2118
2119 default:
2120 break;
2121 }
2122
2123 /* Recursively scan the operands of this expression. */
2124
2125 {
2126 const char *const fmt = GET_RTX_FORMAT (code);
2127 int i;
2128
2129 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2130 {
2131 if (fmt[i] == 'e')
2132 {
2133 if (volatile_insn_p (XEXP (x, i)))
2134 return 1;
2135 }
2136 else if (fmt[i] == 'E')
2137 {
2138 int j;
2139 for (j = 0; j < XVECLEN (x, i); j++)
2140 if (volatile_insn_p (XVECEXP (x, i, j)))
2141 return 1;
2142 }
2143 }
2144 }
2145 return 0;
2146 }
2147
2148 /* Nonzero if X contains any volatile memory references
2149 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2150
2151 int
2152 volatile_refs_p (const_rtx x)
2153 {
2154 const RTX_CODE code = GET_CODE (x);
2155 switch (code)
2156 {
2157 case LABEL_REF:
2158 case SYMBOL_REF:
2159 case CONST:
2160 CASE_CONST_ANY:
2161 case CC0:
2162 case PC:
2163 case REG:
2164 case SCRATCH:
2165 case CLOBBER:
2166 case ADDR_VEC:
2167 case ADDR_DIFF_VEC:
2168 return 0;
2169
2170 case UNSPEC_VOLATILE:
2171 return 1;
2172
2173 case MEM:
2174 case ASM_INPUT:
2175 case ASM_OPERANDS:
2176 if (MEM_VOLATILE_P (x))
2177 return 1;
2178
2179 default:
2180 break;
2181 }
2182
2183 /* Recursively scan the operands of this expression. */
2184
2185 {
2186 const char *const fmt = GET_RTX_FORMAT (code);
2187 int i;
2188
2189 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2190 {
2191 if (fmt[i] == 'e')
2192 {
2193 if (volatile_refs_p (XEXP (x, i)))
2194 return 1;
2195 }
2196 else if (fmt[i] == 'E')
2197 {
2198 int j;
2199 for (j = 0; j < XVECLEN (x, i); j++)
2200 if (volatile_refs_p (XVECEXP (x, i, j)))
2201 return 1;
2202 }
2203 }
2204 }
2205 return 0;
2206 }
2207
2208 /* Similar to above, except that it also rejects register pre- and post-
2209 incrementing. */
2210
2211 int
2212 side_effects_p (const_rtx x)
2213 {
2214 const RTX_CODE code = GET_CODE (x);
2215 switch (code)
2216 {
2217 case LABEL_REF:
2218 case SYMBOL_REF:
2219 case CONST:
2220 CASE_CONST_ANY:
2221 case CC0:
2222 case PC:
2223 case REG:
2224 case SCRATCH:
2225 case ADDR_VEC:
2226 case ADDR_DIFF_VEC:
2227 case VAR_LOCATION:
2228 return 0;
2229
2230 case CLOBBER:
2231 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2232 when some combination can't be done. If we see one, don't think
2233 that we can simplify the expression. */
2234 return (GET_MODE (x) != VOIDmode);
2235
2236 case PRE_INC:
2237 case PRE_DEC:
2238 case POST_INC:
2239 case POST_DEC:
2240 case PRE_MODIFY:
2241 case POST_MODIFY:
2242 case CALL:
2243 case UNSPEC_VOLATILE:
2244 return 1;
2245
2246 case MEM:
2247 case ASM_INPUT:
2248 case ASM_OPERANDS:
2249 if (MEM_VOLATILE_P (x))
2250 return 1;
2251
2252 default:
2253 break;
2254 }
2255
2256 /* Recursively scan the operands of this expression. */
2257
2258 {
2259 const char *fmt = GET_RTX_FORMAT (code);
2260 int i;
2261
2262 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2263 {
2264 if (fmt[i] == 'e')
2265 {
2266 if (side_effects_p (XEXP (x, i)))
2267 return 1;
2268 }
2269 else if (fmt[i] == 'E')
2270 {
2271 int j;
2272 for (j = 0; j < XVECLEN (x, i); j++)
2273 if (side_effects_p (XVECEXP (x, i, j)))
2274 return 1;
2275 }
2276 }
2277 }
2278 return 0;
2279 }
2280 \f
2281 /* Return nonzero if evaluating rtx X might cause a trap.
2282 FLAGS controls how to consider MEMs. A nonzero means the context
2283 of the access may have changed from the original, such that the
2284 address may have become invalid. */
2285
2286 int
2287 may_trap_p_1 (const_rtx x, unsigned flags)
2288 {
2289 int i;
2290 enum rtx_code code;
2291 const char *fmt;
2292
2293 /* We make no distinction currently, but this function is part of
2294 the internal target-hooks ABI so we keep the parameter as
2295 "unsigned flags". */
2296 bool code_changed = flags != 0;
2297
2298 if (x == 0)
2299 return 0;
2300 code = GET_CODE (x);
2301 switch (code)
2302 {
2303 /* Handle these cases quickly. */
2304 CASE_CONST_ANY:
2305 case SYMBOL_REF:
2306 case LABEL_REF:
2307 case CONST:
2308 case PC:
2309 case CC0:
2310 case REG:
2311 case SCRATCH:
2312 return 0;
2313
2314 case UNSPEC:
2315 return targetm.unspec_may_trap_p (x, flags);
2316
2317 case UNSPEC_VOLATILE:
2318 case ASM_INPUT:
2319 case TRAP_IF:
2320 return 1;
2321
2322 case ASM_OPERANDS:
2323 return MEM_VOLATILE_P (x);
2324
2325 /* Memory ref can trap unless it's a static var or a stack slot. */
2326 case MEM:
2327 /* Recognize specific pattern of stack checking probes. */
2328 if (flag_stack_check
2329 && MEM_VOLATILE_P (x)
2330 && XEXP (x, 0) == stack_pointer_rtx)
2331 return 1;
2332 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2333 reference; moving it out of context such as when moving code
2334 when optimizing, might cause its address to become invalid. */
2335 code_changed
2336 || !MEM_NOTRAP_P (x))
2337 {
2338 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2339 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2340 GET_MODE (x), code_changed);
2341 }
2342
2343 return 0;
2344
2345 /* Division by a non-constant might trap. */
2346 case DIV:
2347 case MOD:
2348 case UDIV:
2349 case UMOD:
2350 if (HONOR_SNANS (GET_MODE (x)))
2351 return 1;
2352 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2353 return flag_trapping_math;
2354 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2355 return 1;
2356 break;
2357
2358 case EXPR_LIST:
2359 /* An EXPR_LIST is used to represent a function call. This
2360 certainly may trap. */
2361 return 1;
2362
2363 case GE:
2364 case GT:
2365 case LE:
2366 case LT:
2367 case LTGT:
2368 case COMPARE:
2369 /* Some floating point comparisons may trap. */
2370 if (!flag_trapping_math)
2371 break;
2372 /* ??? There is no machine independent way to check for tests that trap
2373 when COMPARE is used, though many targets do make this distinction.
2374 For instance, sparc uses CCFPE for compares which generate exceptions
2375 and CCFP for compares which do not generate exceptions. */
2376 if (HONOR_NANS (GET_MODE (x)))
2377 return 1;
2378 /* But often the compare has some CC mode, so check operand
2379 modes as well. */
2380 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2381 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2382 return 1;
2383 break;
2384
2385 case EQ:
2386 case NE:
2387 if (HONOR_SNANS (GET_MODE (x)))
2388 return 1;
2389 /* Often comparison is CC mode, so check operand modes. */
2390 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2391 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2392 return 1;
2393 break;
2394
2395 case FIX:
2396 /* Conversion of floating point might trap. */
2397 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2398 return 1;
2399 break;
2400
2401 case NEG:
2402 case ABS:
2403 case SUBREG:
2404 /* These operations don't trap even with floating point. */
2405 break;
2406
2407 default:
2408 /* Any floating arithmetic may trap. */
2409 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2410 return 1;
2411 }
2412
2413 fmt = GET_RTX_FORMAT (code);
2414 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2415 {
2416 if (fmt[i] == 'e')
2417 {
2418 if (may_trap_p_1 (XEXP (x, i), flags))
2419 return 1;
2420 }
2421 else if (fmt[i] == 'E')
2422 {
2423 int j;
2424 for (j = 0; j < XVECLEN (x, i); j++)
2425 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2426 return 1;
2427 }
2428 }
2429 return 0;
2430 }
2431
2432 /* Return nonzero if evaluating rtx X might cause a trap. */
2433
2434 int
2435 may_trap_p (const_rtx x)
2436 {
2437 return may_trap_p_1 (x, 0);
2438 }
2439
2440 /* Same as above, but additionally return nonzero if evaluating rtx X might
2441 cause a fault. We define a fault for the purpose of this function as a
2442 erroneous execution condition that cannot be encountered during the normal
2443 execution of a valid program; the typical example is an unaligned memory
2444 access on a strict alignment machine. The compiler guarantees that it
2445 doesn't generate code that will fault from a valid program, but this
2446 guarantee doesn't mean anything for individual instructions. Consider
2447 the following example:
2448
2449 struct S { int d; union { char *cp; int *ip; }; };
2450
2451 int foo(struct S *s)
2452 {
2453 if (s->d == 1)
2454 return *s->ip;
2455 else
2456 return *s->cp;
2457 }
2458
2459 on a strict alignment machine. In a valid program, foo will never be
2460 invoked on a structure for which d is equal to 1 and the underlying
2461 unique field of the union not aligned on a 4-byte boundary, but the
2462 expression *s->ip might cause a fault if considered individually.
2463
2464 At the RTL level, potentially problematic expressions will almost always
2465 verify may_trap_p; for example, the above dereference can be emitted as
2466 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2467 However, suppose that foo is inlined in a caller that causes s->cp to
2468 point to a local character variable and guarantees that s->d is not set
2469 to 1; foo may have been effectively translated into pseudo-RTL as:
2470
2471 if ((reg:SI) == 1)
2472 (set (reg:SI) (mem:SI (%fp - 7)))
2473 else
2474 (set (reg:QI) (mem:QI (%fp - 7)))
2475
2476 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2477 memory reference to a stack slot, but it will certainly cause a fault
2478 on a strict alignment machine. */
2479
2480 int
2481 may_trap_or_fault_p (const_rtx x)
2482 {
2483 return may_trap_p_1 (x, 1);
2484 }
2485 \f
2486 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2487 i.e., an inequality. */
2488
2489 int
2490 inequality_comparisons_p (const_rtx x)
2491 {
2492 const char *fmt;
2493 int len, i;
2494 const enum rtx_code code = GET_CODE (x);
2495
2496 switch (code)
2497 {
2498 case REG:
2499 case SCRATCH:
2500 case PC:
2501 case CC0:
2502 CASE_CONST_ANY:
2503 case CONST:
2504 case LABEL_REF:
2505 case SYMBOL_REF:
2506 return 0;
2507
2508 case LT:
2509 case LTU:
2510 case GT:
2511 case GTU:
2512 case LE:
2513 case LEU:
2514 case GE:
2515 case GEU:
2516 return 1;
2517
2518 default:
2519 break;
2520 }
2521
2522 len = GET_RTX_LENGTH (code);
2523 fmt = GET_RTX_FORMAT (code);
2524
2525 for (i = 0; i < len; i++)
2526 {
2527 if (fmt[i] == 'e')
2528 {
2529 if (inequality_comparisons_p (XEXP (x, i)))
2530 return 1;
2531 }
2532 else if (fmt[i] == 'E')
2533 {
2534 int j;
2535 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2536 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2537 return 1;
2538 }
2539 }
2540
2541 return 0;
2542 }
2543 \f
2544 /* Replace any occurrence of FROM in X with TO. The function does
2545 not enter into CONST_DOUBLE for the replace.
2546
2547 Note that copying is not done so X must not be shared unless all copies
2548 are to be modified. */
2549
2550 rtx
2551 replace_rtx (rtx x, rtx from, rtx to)
2552 {
2553 int i, j;
2554 const char *fmt;
2555
2556 if (x == from)
2557 return to;
2558
2559 /* Allow this function to make replacements in EXPR_LISTs. */
2560 if (x == 0)
2561 return 0;
2562
2563 if (GET_CODE (x) == SUBREG)
2564 {
2565 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2566
2567 if (CONST_INT_P (new_rtx))
2568 {
2569 x = simplify_subreg (GET_MODE (x), new_rtx,
2570 GET_MODE (SUBREG_REG (x)),
2571 SUBREG_BYTE (x));
2572 gcc_assert (x);
2573 }
2574 else
2575 SUBREG_REG (x) = new_rtx;
2576
2577 return x;
2578 }
2579 else if (GET_CODE (x) == ZERO_EXTEND)
2580 {
2581 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2582
2583 if (CONST_INT_P (new_rtx))
2584 {
2585 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2586 new_rtx, GET_MODE (XEXP (x, 0)));
2587 gcc_assert (x);
2588 }
2589 else
2590 XEXP (x, 0) = new_rtx;
2591
2592 return x;
2593 }
2594
2595 fmt = GET_RTX_FORMAT (GET_CODE (x));
2596 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2597 {
2598 if (fmt[i] == 'e')
2599 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2600 else if (fmt[i] == 'E')
2601 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2602 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2603 }
2604
2605 return x;
2606 }
2607 \f
2608 /* Replace occurrences of the old label in *X with the new one.
2609 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2610
2611 int
2612 replace_label (rtx *x, void *data)
2613 {
2614 rtx l = *x;
2615 rtx old_label = ((replace_label_data *) data)->r1;
2616 rtx new_label = ((replace_label_data *) data)->r2;
2617 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2618
2619 if (l == NULL_RTX)
2620 return 0;
2621
2622 if (GET_CODE (l) == SYMBOL_REF
2623 && CONSTANT_POOL_ADDRESS_P (l))
2624 {
2625 rtx c = get_pool_constant (l);
2626 if (rtx_referenced_p (old_label, c))
2627 {
2628 rtx new_c, new_l;
2629 replace_label_data *d = (replace_label_data *) data;
2630
2631 /* Create a copy of constant C; replace the label inside
2632 but do not update LABEL_NUSES because uses in constant pool
2633 are not counted. */
2634 new_c = copy_rtx (c);
2635 d->update_label_nuses = false;
2636 for_each_rtx (&new_c, replace_label, data);
2637 d->update_label_nuses = update_label_nuses;
2638
2639 /* Add the new constant NEW_C to constant pool and replace
2640 the old reference to constant by new reference. */
2641 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2642 *x = replace_rtx (l, l, new_l);
2643 }
2644 return 0;
2645 }
2646
2647 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2648 field. This is not handled by for_each_rtx because it doesn't
2649 handle unprinted ('0') fields. */
2650 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2651 JUMP_LABEL (l) = new_label;
2652
2653 if ((GET_CODE (l) == LABEL_REF
2654 || GET_CODE (l) == INSN_LIST)
2655 && XEXP (l, 0) == old_label)
2656 {
2657 XEXP (l, 0) = new_label;
2658 if (update_label_nuses)
2659 {
2660 ++LABEL_NUSES (new_label);
2661 --LABEL_NUSES (old_label);
2662 }
2663 return 0;
2664 }
2665
2666 return 0;
2667 }
2668
2669 /* When *BODY is equal to X or X is directly referenced by *BODY
2670 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2671 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2672
2673 static int
2674 rtx_referenced_p_1 (rtx *body, void *x)
2675 {
2676 rtx y = (rtx) x;
2677
2678 if (*body == NULL_RTX)
2679 return y == NULL_RTX;
2680
2681 /* Return true if a label_ref *BODY refers to label Y. */
2682 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2683 return XEXP (*body, 0) == y;
2684
2685 /* If *BODY is a reference to pool constant traverse the constant. */
2686 if (GET_CODE (*body) == SYMBOL_REF
2687 && CONSTANT_POOL_ADDRESS_P (*body))
2688 return rtx_referenced_p (y, get_pool_constant (*body));
2689
2690 /* By default, compare the RTL expressions. */
2691 return rtx_equal_p (*body, y);
2692 }
2693
2694 /* Return true if X is referenced in BODY. */
2695
2696 int
2697 rtx_referenced_p (rtx x, rtx body)
2698 {
2699 return for_each_rtx (&body, rtx_referenced_p_1, x);
2700 }
2701
2702 /* If INSN is a tablejump return true and store the label (before jump table) to
2703 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2704
2705 bool
2706 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2707 {
2708 rtx label, table;
2709
2710 if (!JUMP_P (insn))
2711 return false;
2712
2713 label = JUMP_LABEL (insn);
2714 if (label != NULL_RTX && !ANY_RETURN_P (label)
2715 && (table = next_active_insn (label)) != NULL_RTX
2716 && JUMP_TABLE_DATA_P (table))
2717 {
2718 gcc_assert (table == NEXT_INSN (label));
2719 if (labelp)
2720 *labelp = label;
2721 if (tablep)
2722 *tablep = table;
2723 return true;
2724 }
2725 return false;
2726 }
2727
2728 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2729 constant that is not in the constant pool and not in the condition
2730 of an IF_THEN_ELSE. */
2731
2732 static int
2733 computed_jump_p_1 (const_rtx x)
2734 {
2735 const enum rtx_code code = GET_CODE (x);
2736 int i, j;
2737 const char *fmt;
2738
2739 switch (code)
2740 {
2741 case LABEL_REF:
2742 case PC:
2743 return 0;
2744
2745 case CONST:
2746 CASE_CONST_ANY:
2747 case SYMBOL_REF:
2748 case REG:
2749 return 1;
2750
2751 case MEM:
2752 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2753 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2754
2755 case IF_THEN_ELSE:
2756 return (computed_jump_p_1 (XEXP (x, 1))
2757 || computed_jump_p_1 (XEXP (x, 2)));
2758
2759 default:
2760 break;
2761 }
2762
2763 fmt = GET_RTX_FORMAT (code);
2764 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2765 {
2766 if (fmt[i] == 'e'
2767 && computed_jump_p_1 (XEXP (x, i)))
2768 return 1;
2769
2770 else if (fmt[i] == 'E')
2771 for (j = 0; j < XVECLEN (x, i); j++)
2772 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2773 return 1;
2774 }
2775
2776 return 0;
2777 }
2778
2779 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2780
2781 Tablejumps and casesi insns are not considered indirect jumps;
2782 we can recognize them by a (use (label_ref)). */
2783
2784 int
2785 computed_jump_p (const_rtx insn)
2786 {
2787 int i;
2788 if (JUMP_P (insn))
2789 {
2790 rtx pat = PATTERN (insn);
2791
2792 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2793 if (JUMP_LABEL (insn) != NULL)
2794 return 0;
2795
2796 if (GET_CODE (pat) == PARALLEL)
2797 {
2798 int len = XVECLEN (pat, 0);
2799 int has_use_labelref = 0;
2800
2801 for (i = len - 1; i >= 0; i--)
2802 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2803 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2804 == LABEL_REF))
2805 {
2806 has_use_labelref = 1;
2807 break;
2808 }
2809
2810 if (! has_use_labelref)
2811 for (i = len - 1; i >= 0; i--)
2812 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2813 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2814 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2815 return 1;
2816 }
2817 else if (GET_CODE (pat) == SET
2818 && SET_DEST (pat) == pc_rtx
2819 && computed_jump_p_1 (SET_SRC (pat)))
2820 return 1;
2821 }
2822 return 0;
2823 }
2824
2825 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2826 calls. Processes the subexpressions of EXP and passes them to F. */
2827 static int
2828 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2829 {
2830 int result, i, j;
2831 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2832 rtx *x;
2833
2834 for (; format[n] != '\0'; n++)
2835 {
2836 switch (format[n])
2837 {
2838 case 'e':
2839 /* Call F on X. */
2840 x = &XEXP (exp, n);
2841 result = (*f) (x, data);
2842 if (result == -1)
2843 /* Do not traverse sub-expressions. */
2844 continue;
2845 else if (result != 0)
2846 /* Stop the traversal. */
2847 return result;
2848
2849 if (*x == NULL_RTX)
2850 /* There are no sub-expressions. */
2851 continue;
2852
2853 i = non_rtx_starting_operands[GET_CODE (*x)];
2854 if (i >= 0)
2855 {
2856 result = for_each_rtx_1 (*x, i, f, data);
2857 if (result != 0)
2858 return result;
2859 }
2860 break;
2861
2862 case 'V':
2863 case 'E':
2864 if (XVEC (exp, n) == 0)
2865 continue;
2866 for (j = 0; j < XVECLEN (exp, n); ++j)
2867 {
2868 /* Call F on X. */
2869 x = &XVECEXP (exp, n, j);
2870 result = (*f) (x, data);
2871 if (result == -1)
2872 /* Do not traverse sub-expressions. */
2873 continue;
2874 else if (result != 0)
2875 /* Stop the traversal. */
2876 return result;
2877
2878 if (*x == NULL_RTX)
2879 /* There are no sub-expressions. */
2880 continue;
2881
2882 i = non_rtx_starting_operands[GET_CODE (*x)];
2883 if (i >= 0)
2884 {
2885 result = for_each_rtx_1 (*x, i, f, data);
2886 if (result != 0)
2887 return result;
2888 }
2889 }
2890 break;
2891
2892 default:
2893 /* Nothing to do. */
2894 break;
2895 }
2896 }
2897
2898 return 0;
2899 }
2900
2901 /* Traverse X via depth-first search, calling F for each
2902 sub-expression (including X itself). F is also passed the DATA.
2903 If F returns -1, do not traverse sub-expressions, but continue
2904 traversing the rest of the tree. If F ever returns any other
2905 nonzero value, stop the traversal, and return the value returned
2906 by F. Otherwise, return 0. This function does not traverse inside
2907 tree structure that contains RTX_EXPRs, or into sub-expressions
2908 whose format code is `0' since it is not known whether or not those
2909 codes are actually RTL.
2910
2911 This routine is very general, and could (should?) be used to
2912 implement many of the other routines in this file. */
2913
2914 int
2915 for_each_rtx (rtx *x, rtx_function f, void *data)
2916 {
2917 int result;
2918 int i;
2919
2920 /* Call F on X. */
2921 result = (*f) (x, data);
2922 if (result == -1)
2923 /* Do not traverse sub-expressions. */
2924 return 0;
2925 else if (result != 0)
2926 /* Stop the traversal. */
2927 return result;
2928
2929 if (*x == NULL_RTX)
2930 /* There are no sub-expressions. */
2931 return 0;
2932
2933 i = non_rtx_starting_operands[GET_CODE (*x)];
2934 if (i < 0)
2935 return 0;
2936
2937 return for_each_rtx_1 (*x, i, f, data);
2938 }
2939
2940 \f
2941
2942 /* Data structure that holds the internal state communicated between
2943 for_each_inc_dec, for_each_inc_dec_find_mem and
2944 for_each_inc_dec_find_inc_dec. */
2945
2946 struct for_each_inc_dec_ops {
2947 /* The function to be called for each autoinc operation found. */
2948 for_each_inc_dec_fn fn;
2949 /* The opaque argument to be passed to it. */
2950 void *arg;
2951 /* The MEM we're visiting, if any. */
2952 rtx mem;
2953 };
2954
2955 static int for_each_inc_dec_find_mem (rtx *r, void *d);
2956
2957 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2958 operands of the equivalent add insn and pass the result to the
2959 operator specified by *D. */
2960
2961 static int
2962 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
2963 {
2964 rtx x = *r;
2965 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
2966
2967 switch (GET_CODE (x))
2968 {
2969 case PRE_INC:
2970 case POST_INC:
2971 {
2972 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2973 rtx r1 = XEXP (x, 0);
2974 rtx c = gen_int_mode (size, GET_MODE (r1));
2975 return data->fn (data->mem, x, r1, r1, c, data->arg);
2976 }
2977
2978 case PRE_DEC:
2979 case POST_DEC:
2980 {
2981 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2982 rtx r1 = XEXP (x, 0);
2983 rtx c = gen_int_mode (-size, GET_MODE (r1));
2984 return data->fn (data->mem, x, r1, r1, c, data->arg);
2985 }
2986
2987 case PRE_MODIFY:
2988 case POST_MODIFY:
2989 {
2990 rtx r1 = XEXP (x, 0);
2991 rtx add = XEXP (x, 1);
2992 return data->fn (data->mem, x, r1, add, NULL, data->arg);
2993 }
2994
2995 case MEM:
2996 {
2997 rtx save = data->mem;
2998 int ret = for_each_inc_dec_find_mem (r, d);
2999 data->mem = save;
3000 return ret;
3001 }
3002
3003 default:
3004 return 0;
3005 }
3006 }
3007
3008 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3009 address, extract the operands of the equivalent add insn and pass
3010 the result to the operator specified by *D. */
3011
3012 static int
3013 for_each_inc_dec_find_mem (rtx *r, void *d)
3014 {
3015 rtx x = *r;
3016 if (x != NULL_RTX && MEM_P (x))
3017 {
3018 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3019 int result;
3020
3021 data->mem = x;
3022
3023 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3024 data);
3025 if (result)
3026 return result;
3027
3028 return -1;
3029 }
3030 return 0;
3031 }
3032
3033 /* Traverse *X looking for MEMs, and for autoinc operations within
3034 them. For each such autoinc operation found, call FN, passing it
3035 the innermost enclosing MEM, the operation itself, the RTX modified
3036 by the operation, two RTXs (the second may be NULL) that, once
3037 added, represent the value to be held by the modified RTX
3038 afterwards, and ARG. FN is to return -1 to skip looking for other
3039 autoinc operations within the visited operation, 0 to continue the
3040 traversal, or any other value to have it returned to the caller of
3041 for_each_inc_dec. */
3042
3043 int
3044 for_each_inc_dec (rtx *x,
3045 for_each_inc_dec_fn fn,
3046 void *arg)
3047 {
3048 struct for_each_inc_dec_ops data;
3049
3050 data.fn = fn;
3051 data.arg = arg;
3052 data.mem = NULL;
3053
3054 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3055 }
3056
3057 \f
3058 /* Searches X for any reference to REGNO, returning the rtx of the
3059 reference found if any. Otherwise, returns NULL_RTX. */
3060
3061 rtx
3062 regno_use_in (unsigned int regno, rtx x)
3063 {
3064 const char *fmt;
3065 int i, j;
3066 rtx tem;
3067
3068 if (REG_P (x) && REGNO (x) == regno)
3069 return x;
3070
3071 fmt = GET_RTX_FORMAT (GET_CODE (x));
3072 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3073 {
3074 if (fmt[i] == 'e')
3075 {
3076 if ((tem = regno_use_in (regno, XEXP (x, i))))
3077 return tem;
3078 }
3079 else if (fmt[i] == 'E')
3080 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3081 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3082 return tem;
3083 }
3084
3085 return NULL_RTX;
3086 }
3087
3088 /* Return a value indicating whether OP, an operand of a commutative
3089 operation, is preferred as the first or second operand. The higher
3090 the value, the stronger the preference for being the first operand.
3091 We use negative values to indicate a preference for the first operand
3092 and positive values for the second operand. */
3093
3094 int
3095 commutative_operand_precedence (rtx op)
3096 {
3097 enum rtx_code code = GET_CODE (op);
3098
3099 /* Constants always come the second operand. Prefer "nice" constants. */
3100 if (code == CONST_INT)
3101 return -8;
3102 if (code == CONST_DOUBLE)
3103 return -7;
3104 if (code == CONST_FIXED)
3105 return -7;
3106 op = avoid_constant_pool_reference (op);
3107 code = GET_CODE (op);
3108
3109 switch (GET_RTX_CLASS (code))
3110 {
3111 case RTX_CONST_OBJ:
3112 if (code == CONST_INT)
3113 return -6;
3114 if (code == CONST_DOUBLE)
3115 return -5;
3116 if (code == CONST_FIXED)
3117 return -5;
3118 return -4;
3119
3120 case RTX_EXTRA:
3121 /* SUBREGs of objects should come second. */
3122 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3123 return -3;
3124 return 0;
3125
3126 case RTX_OBJ:
3127 /* Complex expressions should be the first, so decrease priority
3128 of objects. Prefer pointer objects over non pointer objects. */
3129 if ((REG_P (op) && REG_POINTER (op))
3130 || (MEM_P (op) && MEM_POINTER (op)))
3131 return -1;
3132 return -2;
3133
3134 case RTX_COMM_ARITH:
3135 /* Prefer operands that are themselves commutative to be first.
3136 This helps to make things linear. In particular,
3137 (and (and (reg) (reg)) (not (reg))) is canonical. */
3138 return 4;
3139
3140 case RTX_BIN_ARITH:
3141 /* If only one operand is a binary expression, it will be the first
3142 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3143 is canonical, although it will usually be further simplified. */
3144 return 2;
3145
3146 case RTX_UNARY:
3147 /* Then prefer NEG and NOT. */
3148 if (code == NEG || code == NOT)
3149 return 1;
3150
3151 default:
3152 return 0;
3153 }
3154 }
3155
3156 /* Return 1 iff it is necessary to swap operands of commutative operation
3157 in order to canonicalize expression. */
3158
3159 bool
3160 swap_commutative_operands_p (rtx x, rtx y)
3161 {
3162 return (commutative_operand_precedence (x)
3163 < commutative_operand_precedence (y));
3164 }
3165
3166 /* Return 1 if X is an autoincrement side effect and the register is
3167 not the stack pointer. */
3168 int
3169 auto_inc_p (const_rtx x)
3170 {
3171 switch (GET_CODE (x))
3172 {
3173 case PRE_INC:
3174 case POST_INC:
3175 case PRE_DEC:
3176 case POST_DEC:
3177 case PRE_MODIFY:
3178 case POST_MODIFY:
3179 /* There are no REG_INC notes for SP. */
3180 if (XEXP (x, 0) != stack_pointer_rtx)
3181 return 1;
3182 default:
3183 break;
3184 }
3185 return 0;
3186 }
3187
3188 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3189 int
3190 loc_mentioned_in_p (rtx *loc, const_rtx in)
3191 {
3192 enum rtx_code code;
3193 const char *fmt;
3194 int i, j;
3195
3196 if (!in)
3197 return 0;
3198
3199 code = GET_CODE (in);
3200 fmt = GET_RTX_FORMAT (code);
3201 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3202 {
3203 if (fmt[i] == 'e')
3204 {
3205 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3206 return 1;
3207 }
3208 else if (fmt[i] == 'E')
3209 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3210 if (loc == &XVECEXP (in, i, j)
3211 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3212 return 1;
3213 }
3214 return 0;
3215 }
3216
3217 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3218 and SUBREG_BYTE, return the bit offset where the subreg begins
3219 (counting from the least significant bit of the operand). */
3220
3221 unsigned int
3222 subreg_lsb_1 (enum machine_mode outer_mode,
3223 enum machine_mode inner_mode,
3224 unsigned int subreg_byte)
3225 {
3226 unsigned int bitpos;
3227 unsigned int byte;
3228 unsigned int word;
3229
3230 /* A paradoxical subreg begins at bit position 0. */
3231 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3232 return 0;
3233
3234 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3235 /* If the subreg crosses a word boundary ensure that
3236 it also begins and ends on a word boundary. */
3237 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3238 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3239 && (subreg_byte % UNITS_PER_WORD
3240 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3241
3242 if (WORDS_BIG_ENDIAN)
3243 word = (GET_MODE_SIZE (inner_mode)
3244 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3245 else
3246 word = subreg_byte / UNITS_PER_WORD;
3247 bitpos = word * BITS_PER_WORD;
3248
3249 if (BYTES_BIG_ENDIAN)
3250 byte = (GET_MODE_SIZE (inner_mode)
3251 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3252 else
3253 byte = subreg_byte % UNITS_PER_WORD;
3254 bitpos += byte * BITS_PER_UNIT;
3255
3256 return bitpos;
3257 }
3258
3259 /* Given a subreg X, return the bit offset where the subreg begins
3260 (counting from the least significant bit of the reg). */
3261
3262 unsigned int
3263 subreg_lsb (const_rtx x)
3264 {
3265 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3266 SUBREG_BYTE (x));
3267 }
3268
3269 /* Fill in information about a subreg of a hard register.
3270 xregno - A regno of an inner hard subreg_reg (or what will become one).
3271 xmode - The mode of xregno.
3272 offset - The byte offset.
3273 ymode - The mode of a top level SUBREG (or what may become one).
3274 info - Pointer to structure to fill in. */
3275 void
3276 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3277 unsigned int offset, enum machine_mode ymode,
3278 struct subreg_info *info)
3279 {
3280 int nregs_xmode, nregs_ymode;
3281 int mode_multiple, nregs_multiple;
3282 int offset_adj, y_offset, y_offset_adj;
3283 int regsize_xmode, regsize_ymode;
3284 bool rknown;
3285
3286 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3287
3288 rknown = false;
3289
3290 /* If there are holes in a non-scalar mode in registers, we expect
3291 that it is made up of its units concatenated together. */
3292 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3293 {
3294 enum machine_mode xmode_unit;
3295
3296 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3297 if (GET_MODE_INNER (xmode) == VOIDmode)
3298 xmode_unit = xmode;
3299 else
3300 xmode_unit = GET_MODE_INNER (xmode);
3301 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3302 gcc_assert (nregs_xmode
3303 == (GET_MODE_NUNITS (xmode)
3304 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3305 gcc_assert (hard_regno_nregs[xregno][xmode]
3306 == (hard_regno_nregs[xregno][xmode_unit]
3307 * GET_MODE_NUNITS (xmode)));
3308
3309 /* You can only ask for a SUBREG of a value with holes in the middle
3310 if you don't cross the holes. (Such a SUBREG should be done by
3311 picking a different register class, or doing it in memory if
3312 necessary.) An example of a value with holes is XCmode on 32-bit
3313 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3314 3 for each part, but in memory it's two 128-bit parts.
3315 Padding is assumed to be at the end (not necessarily the 'high part')
3316 of each unit. */
3317 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3318 < GET_MODE_NUNITS (xmode))
3319 && (offset / GET_MODE_SIZE (xmode_unit)
3320 != ((offset + GET_MODE_SIZE (ymode) - 1)
3321 / GET_MODE_SIZE (xmode_unit))))
3322 {
3323 info->representable_p = false;
3324 rknown = true;
3325 }
3326 }
3327 else
3328 nregs_xmode = hard_regno_nregs[xregno][xmode];
3329
3330 nregs_ymode = hard_regno_nregs[xregno][ymode];
3331
3332 /* Paradoxical subregs are otherwise valid. */
3333 if (!rknown
3334 && offset == 0
3335 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3336 {
3337 info->representable_p = true;
3338 /* If this is a big endian paradoxical subreg, which uses more
3339 actual hard registers than the original register, we must
3340 return a negative offset so that we find the proper highpart
3341 of the register. */
3342 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3343 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3344 info->offset = nregs_xmode - nregs_ymode;
3345 else
3346 info->offset = 0;
3347 info->nregs = nregs_ymode;
3348 return;
3349 }
3350
3351 /* If registers store different numbers of bits in the different
3352 modes, we cannot generally form this subreg. */
3353 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3354 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3355 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3356 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3357 {
3358 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3359 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3360 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3361 {
3362 info->representable_p = false;
3363 info->nregs
3364 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3365 info->offset = offset / regsize_xmode;
3366 return;
3367 }
3368 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3369 {
3370 info->representable_p = false;
3371 info->nregs
3372 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3373 info->offset = offset / regsize_xmode;
3374 return;
3375 }
3376 }
3377
3378 /* Lowpart subregs are otherwise valid. */
3379 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3380 {
3381 info->representable_p = true;
3382 rknown = true;
3383
3384 if (offset == 0 || nregs_xmode == nregs_ymode)
3385 {
3386 info->offset = 0;
3387 info->nregs = nregs_ymode;
3388 return;
3389 }
3390 }
3391
3392 /* This should always pass, otherwise we don't know how to verify
3393 the constraint. These conditions may be relaxed but
3394 subreg_regno_offset would need to be redesigned. */
3395 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3396 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3397
3398 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3399 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3400 {
3401 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3402 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3403 HOST_WIDE_INT off_low = offset & (ysize - 1);
3404 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3405 offset = (xsize - ysize - off_high) | off_low;
3406 }
3407 /* The XMODE value can be seen as a vector of NREGS_XMODE
3408 values. The subreg must represent a lowpart of given field.
3409 Compute what field it is. */
3410 offset_adj = offset;
3411 offset_adj -= subreg_lowpart_offset (ymode,
3412 mode_for_size (GET_MODE_BITSIZE (xmode)
3413 / nregs_xmode,
3414 MODE_INT, 0));
3415
3416 /* Size of ymode must not be greater than the size of xmode. */
3417 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3418 gcc_assert (mode_multiple != 0);
3419
3420 y_offset = offset / GET_MODE_SIZE (ymode);
3421 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3422 nregs_multiple = nregs_xmode / nregs_ymode;
3423
3424 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3425 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3426
3427 if (!rknown)
3428 {
3429 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3430 rknown = true;
3431 }
3432 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3433 info->nregs = nregs_ymode;
3434 }
3435
3436 /* This function returns the regno offset of a subreg expression.
3437 xregno - A regno of an inner hard subreg_reg (or what will become one).
3438 xmode - The mode of xregno.
3439 offset - The byte offset.
3440 ymode - The mode of a top level SUBREG (or what may become one).
3441 RETURN - The regno offset which would be used. */
3442 unsigned int
3443 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3444 unsigned int offset, enum machine_mode ymode)
3445 {
3446 struct subreg_info info;
3447 subreg_get_info (xregno, xmode, offset, ymode, &info);
3448 return info.offset;
3449 }
3450
3451 /* This function returns true when the offset is representable via
3452 subreg_offset in the given regno.
3453 xregno - A regno of an inner hard subreg_reg (or what will become one).
3454 xmode - The mode of xregno.
3455 offset - The byte offset.
3456 ymode - The mode of a top level SUBREG (or what may become one).
3457 RETURN - Whether the offset is representable. */
3458 bool
3459 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3460 unsigned int offset, enum machine_mode ymode)
3461 {
3462 struct subreg_info info;
3463 subreg_get_info (xregno, xmode, offset, ymode, &info);
3464 return info.representable_p;
3465 }
3466
3467 /* Return the number of a YMODE register to which
3468
3469 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3470
3471 can be simplified. Return -1 if the subreg can't be simplified.
3472
3473 XREGNO is a hard register number. */
3474
3475 int
3476 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3477 unsigned int offset, enum machine_mode ymode)
3478 {
3479 struct subreg_info info;
3480 unsigned int yregno;
3481
3482 #ifdef CANNOT_CHANGE_MODE_CLASS
3483 /* Give the backend a chance to disallow the mode change. */
3484 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3485 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3486 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3487 /* We can use mode change in LRA for some transformations. */
3488 && ! lra_in_progress)
3489 return -1;
3490 #endif
3491
3492 /* We shouldn't simplify stack-related registers. */
3493 if ((!reload_completed || frame_pointer_needed)
3494 && xregno == FRAME_POINTER_REGNUM)
3495 return -1;
3496
3497 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3498 && xregno == ARG_POINTER_REGNUM)
3499 return -1;
3500
3501 if (xregno == STACK_POINTER_REGNUM
3502 /* We should convert hard stack register in LRA if it is
3503 possible. */
3504 && ! lra_in_progress)
3505 return -1;
3506
3507 /* Try to get the register offset. */
3508 subreg_get_info (xregno, xmode, offset, ymode, &info);
3509 if (!info.representable_p)
3510 return -1;
3511
3512 /* Make sure that the offsetted register value is in range. */
3513 yregno = xregno + info.offset;
3514 if (!HARD_REGISTER_NUM_P (yregno))
3515 return -1;
3516
3517 /* See whether (reg:YMODE YREGNO) is valid.
3518
3519 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3520 This is a kludge to work around how complex FP arguments are passed
3521 on IA-64 and should be fixed. See PR target/49226. */
3522 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3523 && HARD_REGNO_MODE_OK (xregno, xmode))
3524 return -1;
3525
3526 return (int) yregno;
3527 }
3528
3529 /* Return the final regno that a subreg expression refers to. */
3530 unsigned int
3531 subreg_regno (const_rtx x)
3532 {
3533 unsigned int ret;
3534 rtx subreg = SUBREG_REG (x);
3535 int regno = REGNO (subreg);
3536
3537 ret = regno + subreg_regno_offset (regno,
3538 GET_MODE (subreg),
3539 SUBREG_BYTE (x),
3540 GET_MODE (x));
3541 return ret;
3542
3543 }
3544
3545 /* Return the number of registers that a subreg expression refers
3546 to. */
3547 unsigned int
3548 subreg_nregs (const_rtx x)
3549 {
3550 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3551 }
3552
3553 /* Return the number of registers that a subreg REG with REGNO
3554 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3555 changed so that the regno can be passed in. */
3556
3557 unsigned int
3558 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3559 {
3560 struct subreg_info info;
3561 rtx subreg = SUBREG_REG (x);
3562
3563 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3564 &info);
3565 return info.nregs;
3566 }
3567
3568
3569 struct parms_set_data
3570 {
3571 int nregs;
3572 HARD_REG_SET regs;
3573 };
3574
3575 /* Helper function for noticing stores to parameter registers. */
3576 static void
3577 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3578 {
3579 struct parms_set_data *const d = (struct parms_set_data *) data;
3580 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3581 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3582 {
3583 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3584 d->nregs--;
3585 }
3586 }
3587
3588 /* Look backward for first parameter to be loaded.
3589 Note that loads of all parameters will not necessarily be
3590 found if CSE has eliminated some of them (e.g., an argument
3591 to the outer function is passed down as a parameter).
3592 Do not skip BOUNDARY. */
3593 rtx
3594 find_first_parameter_load (rtx call_insn, rtx boundary)
3595 {
3596 struct parms_set_data parm;
3597 rtx p, before, first_set;
3598
3599 /* Since different machines initialize their parameter registers
3600 in different orders, assume nothing. Collect the set of all
3601 parameter registers. */
3602 CLEAR_HARD_REG_SET (parm.regs);
3603 parm.nregs = 0;
3604 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3605 if (GET_CODE (XEXP (p, 0)) == USE
3606 && REG_P (XEXP (XEXP (p, 0), 0)))
3607 {
3608 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3609
3610 /* We only care about registers which can hold function
3611 arguments. */
3612 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3613 continue;
3614
3615 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3616 parm.nregs++;
3617 }
3618 before = call_insn;
3619 first_set = call_insn;
3620
3621 /* Search backward for the first set of a register in this set. */
3622 while (parm.nregs && before != boundary)
3623 {
3624 before = PREV_INSN (before);
3625
3626 /* It is possible that some loads got CSEed from one call to
3627 another. Stop in that case. */
3628 if (CALL_P (before))
3629 break;
3630
3631 /* Our caller needs either ensure that we will find all sets
3632 (in case code has not been optimized yet), or take care
3633 for possible labels in a way by setting boundary to preceding
3634 CODE_LABEL. */
3635 if (LABEL_P (before))
3636 {
3637 gcc_assert (before == boundary);
3638 break;
3639 }
3640
3641 if (INSN_P (before))
3642 {
3643 int nregs_old = parm.nregs;
3644 note_stores (PATTERN (before), parms_set, &parm);
3645 /* If we found something that did not set a parameter reg,
3646 we're done. Do not keep going, as that might result
3647 in hoisting an insn before the setting of a pseudo
3648 that is used by the hoisted insn. */
3649 if (nregs_old != parm.nregs)
3650 first_set = before;
3651 else
3652 break;
3653 }
3654 }
3655 return first_set;
3656 }
3657
3658 /* Return true if we should avoid inserting code between INSN and preceding
3659 call instruction. */
3660
3661 bool
3662 keep_with_call_p (const_rtx insn)
3663 {
3664 rtx set;
3665
3666 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3667 {
3668 if (REG_P (SET_DEST (set))
3669 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3670 && fixed_regs[REGNO (SET_DEST (set))]
3671 && general_operand (SET_SRC (set), VOIDmode))
3672 return true;
3673 if (REG_P (SET_SRC (set))
3674 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3675 && REG_P (SET_DEST (set))
3676 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3677 return true;
3678 /* There may be a stack pop just after the call and before the store
3679 of the return register. Search for the actual store when deciding
3680 if we can break or not. */
3681 if (SET_DEST (set) == stack_pointer_rtx)
3682 {
3683 /* This CONST_CAST is okay because next_nonnote_insn just
3684 returns its argument and we assign it to a const_rtx
3685 variable. */
3686 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3687 if (i2 && keep_with_call_p (i2))
3688 return true;
3689 }
3690 }
3691 return false;
3692 }
3693
3694 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3695 to non-complex jumps. That is, direct unconditional, conditional,
3696 and tablejumps, but not computed jumps or returns. It also does
3697 not apply to the fallthru case of a conditional jump. */
3698
3699 bool
3700 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3701 {
3702 rtx tmp = JUMP_LABEL (jump_insn);
3703
3704 if (label == tmp)
3705 return true;
3706
3707 if (tablejump_p (jump_insn, NULL, &tmp))
3708 {
3709 rtvec vec = XVEC (PATTERN (tmp),
3710 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3711 int i, veclen = GET_NUM_ELEM (vec);
3712
3713 for (i = 0; i < veclen; ++i)
3714 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3715 return true;
3716 }
3717
3718 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3719 return true;
3720
3721 return false;
3722 }
3723
3724 \f
3725 /* Return an estimate of the cost of computing rtx X.
3726 One use is in cse, to decide which expression to keep in the hash table.
3727 Another is in rtl generation, to pick the cheapest way to multiply.
3728 Other uses like the latter are expected in the future.
3729
3730 X appears as operand OPNO in an expression with code OUTER_CODE.
3731 SPEED specifies whether costs optimized for speed or size should
3732 be returned. */
3733
3734 int
3735 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3736 {
3737 int i, j;
3738 enum rtx_code code;
3739 const char *fmt;
3740 int total;
3741 int factor;
3742
3743 if (x == 0)
3744 return 0;
3745
3746 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3747 many insns, taking N times as long. */
3748 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3749 if (factor == 0)
3750 factor = 1;
3751
3752 /* Compute the default costs of certain things.
3753 Note that targetm.rtx_costs can override the defaults. */
3754
3755 code = GET_CODE (x);
3756 switch (code)
3757 {
3758 case MULT:
3759 /* Multiplication has time-complexity O(N*N), where N is the
3760 number of units (translated from digits) when using
3761 schoolbook long multiplication. */
3762 total = factor * factor * COSTS_N_INSNS (5);
3763 break;
3764 case DIV:
3765 case UDIV:
3766 case MOD:
3767 case UMOD:
3768 /* Similarly, complexity for schoolbook long division. */
3769 total = factor * factor * COSTS_N_INSNS (7);
3770 break;
3771 case USE:
3772 /* Used in combine.c as a marker. */
3773 total = 0;
3774 break;
3775 case SET:
3776 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3777 the mode for the factor. */
3778 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3779 if (factor == 0)
3780 factor = 1;
3781 /* Pass through. */
3782 default:
3783 total = factor * COSTS_N_INSNS (1);
3784 }
3785
3786 switch (code)
3787 {
3788 case REG:
3789 return 0;
3790
3791 case SUBREG:
3792 total = 0;
3793 /* If we can't tie these modes, make this expensive. The larger
3794 the mode, the more expensive it is. */
3795 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3796 return COSTS_N_INSNS (2 + factor);
3797 break;
3798
3799 default:
3800 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3801 return total;
3802 break;
3803 }
3804
3805 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3806 which is already in total. */
3807
3808 fmt = GET_RTX_FORMAT (code);
3809 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3810 if (fmt[i] == 'e')
3811 total += rtx_cost (XEXP (x, i), code, i, speed);
3812 else if (fmt[i] == 'E')
3813 for (j = 0; j < XVECLEN (x, i); j++)
3814 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3815
3816 return total;
3817 }
3818
3819 /* Fill in the structure C with information about both speed and size rtx
3820 costs for X, which is operand OPNO in an expression with code OUTER. */
3821
3822 void
3823 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3824 struct full_rtx_costs *c)
3825 {
3826 c->speed = rtx_cost (x, outer, opno, true);
3827 c->size = rtx_cost (x, outer, opno, false);
3828 }
3829
3830 \f
3831 /* Return cost of address expression X.
3832 Expect that X is properly formed address reference.
3833
3834 SPEED parameter specify whether costs optimized for speed or size should
3835 be returned. */
3836
3837 int
3838 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3839 {
3840 /* We may be asked for cost of various unusual addresses, such as operands
3841 of push instruction. It is not worthwhile to complicate writing
3842 of the target hook by such cases. */
3843
3844 if (!memory_address_addr_space_p (mode, x, as))
3845 return 1000;
3846
3847 return targetm.address_cost (x, mode, as, speed);
3848 }
3849
3850 /* If the target doesn't override, compute the cost as with arithmetic. */
3851
3852 int
3853 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3854 {
3855 return rtx_cost (x, MEM, 0, speed);
3856 }
3857 \f
3858
3859 unsigned HOST_WIDE_INT
3860 nonzero_bits (const_rtx x, enum machine_mode mode)
3861 {
3862 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3863 }
3864
3865 unsigned int
3866 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3867 {
3868 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3869 }
3870
3871 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3872 It avoids exponential behavior in nonzero_bits1 when X has
3873 identical subexpressions on the first or the second level. */
3874
3875 static unsigned HOST_WIDE_INT
3876 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3877 enum machine_mode known_mode,
3878 unsigned HOST_WIDE_INT known_ret)
3879 {
3880 if (x == known_x && mode == known_mode)
3881 return known_ret;
3882
3883 /* Try to find identical subexpressions. If found call
3884 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3885 precomputed value for the subexpression as KNOWN_RET. */
3886
3887 if (ARITHMETIC_P (x))
3888 {
3889 rtx x0 = XEXP (x, 0);
3890 rtx x1 = XEXP (x, 1);
3891
3892 /* Check the first level. */
3893 if (x0 == x1)
3894 return nonzero_bits1 (x, mode, x0, mode,
3895 cached_nonzero_bits (x0, mode, known_x,
3896 known_mode, known_ret));
3897
3898 /* Check the second level. */
3899 if (ARITHMETIC_P (x0)
3900 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3901 return nonzero_bits1 (x, mode, x1, mode,
3902 cached_nonzero_bits (x1, mode, known_x,
3903 known_mode, known_ret));
3904
3905 if (ARITHMETIC_P (x1)
3906 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3907 return nonzero_bits1 (x, mode, x0, mode,
3908 cached_nonzero_bits (x0, mode, known_x,
3909 known_mode, known_ret));
3910 }
3911
3912 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3913 }
3914
3915 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3916 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3917 is less useful. We can't allow both, because that results in exponential
3918 run time recursion. There is a nullstone testcase that triggered
3919 this. This macro avoids accidental uses of num_sign_bit_copies. */
3920 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3921
3922 /* Given an expression, X, compute which bits in X can be nonzero.
3923 We don't care about bits outside of those defined in MODE.
3924
3925 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3926 an arithmetic operation, we can do better. */
3927
3928 static unsigned HOST_WIDE_INT
3929 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3930 enum machine_mode known_mode,
3931 unsigned HOST_WIDE_INT known_ret)
3932 {
3933 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3934 unsigned HOST_WIDE_INT inner_nz;
3935 enum rtx_code code;
3936 enum machine_mode inner_mode;
3937 unsigned int mode_width = GET_MODE_PRECISION (mode);
3938
3939 /* For floating-point and vector values, assume all bits are needed. */
3940 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3941 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3942 return nonzero;
3943
3944 /* If X is wider than MODE, use its mode instead. */
3945 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
3946 {
3947 mode = GET_MODE (x);
3948 nonzero = GET_MODE_MASK (mode);
3949 mode_width = GET_MODE_PRECISION (mode);
3950 }
3951
3952 if (mode_width > HOST_BITS_PER_WIDE_INT)
3953 /* Our only callers in this case look for single bit values. So
3954 just return the mode mask. Those tests will then be false. */
3955 return nonzero;
3956
3957 #ifndef WORD_REGISTER_OPERATIONS
3958 /* If MODE is wider than X, but both are a single word for both the host
3959 and target machines, we can compute this from which bits of the
3960 object might be nonzero in its own mode, taking into account the fact
3961 that on many CISC machines, accessing an object in a wider mode
3962 causes the high-order bits to become undefined. So they are
3963 not known to be zero. */
3964
3965 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3966 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
3967 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3968 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
3969 {
3970 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3971 known_x, known_mode, known_ret);
3972 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3973 return nonzero;
3974 }
3975 #endif
3976
3977 code = GET_CODE (x);
3978 switch (code)
3979 {
3980 case REG:
3981 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3982 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3983 all the bits above ptr_mode are known to be zero. */
3984 /* As we do not know which address space the pointer is referring to,
3985 we can do this only if the target does not support different pointer
3986 or address modes depending on the address space. */
3987 if (target_default_pointer_address_modes_p ()
3988 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3989 && REG_POINTER (x))
3990 nonzero &= GET_MODE_MASK (ptr_mode);
3991 #endif
3992
3993 /* Include declared information about alignment of pointers. */
3994 /* ??? We don't properly preserve REG_POINTER changes across
3995 pointer-to-integer casts, so we can't trust it except for
3996 things that we know must be pointers. See execute/960116-1.c. */
3997 if ((x == stack_pointer_rtx
3998 || x == frame_pointer_rtx
3999 || x == arg_pointer_rtx)
4000 && REGNO_POINTER_ALIGN (REGNO (x)))
4001 {
4002 unsigned HOST_WIDE_INT alignment
4003 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4004
4005 #ifdef PUSH_ROUNDING
4006 /* If PUSH_ROUNDING is defined, it is possible for the
4007 stack to be momentarily aligned only to that amount,
4008 so we pick the least alignment. */
4009 if (x == stack_pointer_rtx && PUSH_ARGS)
4010 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4011 alignment);
4012 #endif
4013
4014 nonzero &= ~(alignment - 1);
4015 }
4016
4017 {
4018 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4019 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4020 known_mode, known_ret,
4021 &nonzero_for_hook);
4022
4023 if (new_rtx)
4024 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4025 known_mode, known_ret);
4026
4027 return nonzero_for_hook;
4028 }
4029
4030 case CONST_INT:
4031 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4032 /* If X is negative in MODE, sign-extend the value. */
4033 if (INTVAL (x) > 0
4034 && mode_width < BITS_PER_WORD
4035 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4036 != 0)
4037 return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
4038 #endif
4039
4040 return UINTVAL (x);
4041
4042 case MEM:
4043 #ifdef LOAD_EXTEND_OP
4044 /* In many, if not most, RISC machines, reading a byte from memory
4045 zeros the rest of the register. Noticing that fact saves a lot
4046 of extra zero-extends. */
4047 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4048 nonzero &= GET_MODE_MASK (GET_MODE (x));
4049 #endif
4050 break;
4051
4052 case EQ: case NE:
4053 case UNEQ: case LTGT:
4054 case GT: case GTU: case UNGT:
4055 case LT: case LTU: case UNLT:
4056 case GE: case GEU: case UNGE:
4057 case LE: case LEU: case UNLE:
4058 case UNORDERED: case ORDERED:
4059 /* If this produces an integer result, we know which bits are set.
4060 Code here used to clear bits outside the mode of X, but that is
4061 now done above. */
4062 /* Mind that MODE is the mode the caller wants to look at this
4063 operation in, and not the actual operation mode. We can wind
4064 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4065 that describes the results of a vector compare. */
4066 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4067 && mode_width <= HOST_BITS_PER_WIDE_INT)
4068 nonzero = STORE_FLAG_VALUE;
4069 break;
4070
4071 case NEG:
4072 #if 0
4073 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4074 and num_sign_bit_copies. */
4075 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4076 == GET_MODE_PRECISION (GET_MODE (x)))
4077 nonzero = 1;
4078 #endif
4079
4080 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4081 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4082 break;
4083
4084 case ABS:
4085 #if 0
4086 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4087 and num_sign_bit_copies. */
4088 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4089 == GET_MODE_PRECISION (GET_MODE (x)))
4090 nonzero = 1;
4091 #endif
4092 break;
4093
4094 case TRUNCATE:
4095 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4096 known_x, known_mode, known_ret)
4097 & GET_MODE_MASK (mode));
4098 break;
4099
4100 case ZERO_EXTEND:
4101 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4102 known_x, known_mode, known_ret);
4103 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4104 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4105 break;
4106
4107 case SIGN_EXTEND:
4108 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4109 Otherwise, show all the bits in the outer mode but not the inner
4110 may be nonzero. */
4111 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4112 known_x, known_mode, known_ret);
4113 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4114 {
4115 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4116 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4117 inner_nz |= (GET_MODE_MASK (mode)
4118 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4119 }
4120
4121 nonzero &= inner_nz;
4122 break;
4123
4124 case AND:
4125 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4126 known_x, known_mode, known_ret)
4127 & cached_nonzero_bits (XEXP (x, 1), mode,
4128 known_x, known_mode, known_ret);
4129 break;
4130
4131 case XOR: case IOR:
4132 case UMIN: case UMAX: case SMIN: case SMAX:
4133 {
4134 unsigned HOST_WIDE_INT nonzero0
4135 = cached_nonzero_bits (XEXP (x, 0), mode,
4136 known_x, known_mode, known_ret);
4137
4138 /* Don't call nonzero_bits for the second time if it cannot change
4139 anything. */
4140 if ((nonzero & nonzero0) != nonzero)
4141 nonzero &= nonzero0
4142 | cached_nonzero_bits (XEXP (x, 1), mode,
4143 known_x, known_mode, known_ret);
4144 }
4145 break;
4146
4147 case PLUS: case MINUS:
4148 case MULT:
4149 case DIV: case UDIV:
4150 case MOD: case UMOD:
4151 /* We can apply the rules of arithmetic to compute the number of
4152 high- and low-order zero bits of these operations. We start by
4153 computing the width (position of the highest-order nonzero bit)
4154 and the number of low-order zero bits for each value. */
4155 {
4156 unsigned HOST_WIDE_INT nz0
4157 = cached_nonzero_bits (XEXP (x, 0), mode,
4158 known_x, known_mode, known_ret);
4159 unsigned HOST_WIDE_INT nz1
4160 = cached_nonzero_bits (XEXP (x, 1), mode,
4161 known_x, known_mode, known_ret);
4162 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4163 int width0 = floor_log2 (nz0) + 1;
4164 int width1 = floor_log2 (nz1) + 1;
4165 int low0 = floor_log2 (nz0 & -nz0);
4166 int low1 = floor_log2 (nz1 & -nz1);
4167 unsigned HOST_WIDE_INT op0_maybe_minusp
4168 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4169 unsigned HOST_WIDE_INT op1_maybe_minusp
4170 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4171 unsigned int result_width = mode_width;
4172 int result_low = 0;
4173
4174 switch (code)
4175 {
4176 case PLUS:
4177 result_width = MAX (width0, width1) + 1;
4178 result_low = MIN (low0, low1);
4179 break;
4180 case MINUS:
4181 result_low = MIN (low0, low1);
4182 break;
4183 case MULT:
4184 result_width = width0 + width1;
4185 result_low = low0 + low1;
4186 break;
4187 case DIV:
4188 if (width1 == 0)
4189 break;
4190 if (!op0_maybe_minusp && !op1_maybe_minusp)
4191 result_width = width0;
4192 break;
4193 case UDIV:
4194 if (width1 == 0)
4195 break;
4196 result_width = width0;
4197 break;
4198 case MOD:
4199 if (width1 == 0)
4200 break;
4201 if (!op0_maybe_minusp && !op1_maybe_minusp)
4202 result_width = MIN (width0, width1);
4203 result_low = MIN (low0, low1);
4204 break;
4205 case UMOD:
4206 if (width1 == 0)
4207 break;
4208 result_width = MIN (width0, width1);
4209 result_low = MIN (low0, low1);
4210 break;
4211 default:
4212 gcc_unreachable ();
4213 }
4214
4215 if (result_width < mode_width)
4216 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4217
4218 if (result_low > 0)
4219 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4220 }
4221 break;
4222
4223 case ZERO_EXTRACT:
4224 if (CONST_INT_P (XEXP (x, 1))
4225 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4226 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4227 break;
4228
4229 case SUBREG:
4230 /* If this is a SUBREG formed for a promoted variable that has
4231 been zero-extended, we know that at least the high-order bits
4232 are zero, though others might be too. */
4233
4234 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4235 nonzero = GET_MODE_MASK (GET_MODE (x))
4236 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4237 known_x, known_mode, known_ret);
4238
4239 inner_mode = GET_MODE (SUBREG_REG (x));
4240 /* If the inner mode is a single word for both the host and target
4241 machines, we can compute this from which bits of the inner
4242 object might be nonzero. */
4243 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4244 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4245 {
4246 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4247 known_x, known_mode, known_ret);
4248
4249 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4250 /* If this is a typical RISC machine, we only have to worry
4251 about the way loads are extended. */
4252 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4253 ? val_signbit_known_set_p (inner_mode, nonzero)
4254 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4255 || !MEM_P (SUBREG_REG (x)))
4256 #endif
4257 {
4258 /* On many CISC machines, accessing an object in a wider mode
4259 causes the high-order bits to become undefined. So they are
4260 not known to be zero. */
4261 if (GET_MODE_PRECISION (GET_MODE (x))
4262 > GET_MODE_PRECISION (inner_mode))
4263 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4264 & ~GET_MODE_MASK (inner_mode));
4265 }
4266 }
4267 break;
4268
4269 case ASHIFTRT:
4270 case LSHIFTRT:
4271 case ASHIFT:
4272 case ROTATE:
4273 /* The nonzero bits are in two classes: any bits within MODE
4274 that aren't in GET_MODE (x) are always significant. The rest of the
4275 nonzero bits are those that are significant in the operand of
4276 the shift when shifted the appropriate number of bits. This
4277 shows that high-order bits are cleared by the right shift and
4278 low-order bits by left shifts. */
4279 if (CONST_INT_P (XEXP (x, 1))
4280 && INTVAL (XEXP (x, 1)) >= 0
4281 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4282 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4283 {
4284 enum machine_mode inner_mode = GET_MODE (x);
4285 unsigned int width = GET_MODE_PRECISION (inner_mode);
4286 int count = INTVAL (XEXP (x, 1));
4287 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4288 unsigned HOST_WIDE_INT op_nonzero
4289 = cached_nonzero_bits (XEXP (x, 0), mode,
4290 known_x, known_mode, known_ret);
4291 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4292 unsigned HOST_WIDE_INT outer = 0;
4293
4294 if (mode_width > width)
4295 outer = (op_nonzero & nonzero & ~mode_mask);
4296
4297 if (code == LSHIFTRT)
4298 inner >>= count;
4299 else if (code == ASHIFTRT)
4300 {
4301 inner >>= count;
4302
4303 /* If the sign bit may have been nonzero before the shift, we
4304 need to mark all the places it could have been copied to
4305 by the shift as possibly nonzero. */
4306 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4307 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4308 << (width - count);
4309 }
4310 else if (code == ASHIFT)
4311 inner <<= count;
4312 else
4313 inner = ((inner << (count % width)
4314 | (inner >> (width - (count % width)))) & mode_mask);
4315
4316 nonzero &= (outer | inner);
4317 }
4318 break;
4319
4320 case FFS:
4321 case POPCOUNT:
4322 /* This is at most the number of bits in the mode. */
4323 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4324 break;
4325
4326 case CLZ:
4327 /* If CLZ has a known value at zero, then the nonzero bits are
4328 that value, plus the number of bits in the mode minus one. */
4329 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4330 nonzero
4331 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4332 else
4333 nonzero = -1;
4334 break;
4335
4336 case CTZ:
4337 /* If CTZ has a known value at zero, then the nonzero bits are
4338 that value, plus the number of bits in the mode minus one. */
4339 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4340 nonzero
4341 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4342 else
4343 nonzero = -1;
4344 break;
4345
4346 case CLRSB:
4347 /* This is at most the number of bits in the mode minus 1. */
4348 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4349 break;
4350
4351 case PARITY:
4352 nonzero = 1;
4353 break;
4354
4355 case IF_THEN_ELSE:
4356 {
4357 unsigned HOST_WIDE_INT nonzero_true
4358 = cached_nonzero_bits (XEXP (x, 1), mode,
4359 known_x, known_mode, known_ret);
4360
4361 /* Don't call nonzero_bits for the second time if it cannot change
4362 anything. */
4363 if ((nonzero & nonzero_true) != nonzero)
4364 nonzero &= nonzero_true
4365 | cached_nonzero_bits (XEXP (x, 2), mode,
4366 known_x, known_mode, known_ret);
4367 }
4368 break;
4369
4370 default:
4371 break;
4372 }
4373
4374 return nonzero;
4375 }
4376
4377 /* See the macro definition above. */
4378 #undef cached_num_sign_bit_copies
4379
4380 \f
4381 /* The function cached_num_sign_bit_copies is a wrapper around
4382 num_sign_bit_copies1. It avoids exponential behavior in
4383 num_sign_bit_copies1 when X has identical subexpressions on the
4384 first or the second level. */
4385
4386 static unsigned int
4387 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4388 enum machine_mode known_mode,
4389 unsigned int known_ret)
4390 {
4391 if (x == known_x && mode == known_mode)
4392 return known_ret;
4393
4394 /* Try to find identical subexpressions. If found call
4395 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4396 the precomputed value for the subexpression as KNOWN_RET. */
4397
4398 if (ARITHMETIC_P (x))
4399 {
4400 rtx x0 = XEXP (x, 0);
4401 rtx x1 = XEXP (x, 1);
4402
4403 /* Check the first level. */
4404 if (x0 == x1)
4405 return
4406 num_sign_bit_copies1 (x, mode, x0, mode,
4407 cached_num_sign_bit_copies (x0, mode, known_x,
4408 known_mode,
4409 known_ret));
4410
4411 /* Check the second level. */
4412 if (ARITHMETIC_P (x0)
4413 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4414 return
4415 num_sign_bit_copies1 (x, mode, x1, mode,
4416 cached_num_sign_bit_copies (x1, mode, known_x,
4417 known_mode,
4418 known_ret));
4419
4420 if (ARITHMETIC_P (x1)
4421 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4422 return
4423 num_sign_bit_copies1 (x, mode, x0, mode,
4424 cached_num_sign_bit_copies (x0, mode, known_x,
4425 known_mode,
4426 known_ret));
4427 }
4428
4429 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4430 }
4431
4432 /* Return the number of bits at the high-order end of X that are known to
4433 be equal to the sign bit. X will be used in mode MODE; if MODE is
4434 VOIDmode, X will be used in its own mode. The returned value will always
4435 be between 1 and the number of bits in MODE. */
4436
4437 static unsigned int
4438 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4439 enum machine_mode known_mode,
4440 unsigned int known_ret)
4441 {
4442 enum rtx_code code = GET_CODE (x);
4443 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4444 int num0, num1, result;
4445 unsigned HOST_WIDE_INT nonzero;
4446
4447 /* If we weren't given a mode, use the mode of X. If the mode is still
4448 VOIDmode, we don't know anything. Likewise if one of the modes is
4449 floating-point. */
4450
4451 if (mode == VOIDmode)
4452 mode = GET_MODE (x);
4453
4454 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4455 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4456 return 1;
4457
4458 /* For a smaller object, just ignore the high bits. */
4459 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4460 {
4461 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4462 known_x, known_mode, known_ret);
4463 return MAX (1,
4464 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4465 }
4466
4467 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4468 {
4469 #ifndef WORD_REGISTER_OPERATIONS
4470 /* If this machine does not do all register operations on the entire
4471 register and MODE is wider than the mode of X, we can say nothing
4472 at all about the high-order bits. */
4473 return 1;
4474 #else
4475 /* Likewise on machines that do, if the mode of the object is smaller
4476 than a word and loads of that size don't sign extend, we can say
4477 nothing about the high order bits. */
4478 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4479 #ifdef LOAD_EXTEND_OP
4480 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4481 #endif
4482 )
4483 return 1;
4484 #endif
4485 }
4486
4487 switch (code)
4488 {
4489 case REG:
4490
4491 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4492 /* If pointers extend signed and this is a pointer in Pmode, say that
4493 all the bits above ptr_mode are known to be sign bit copies. */
4494 /* As we do not know which address space the pointer is referring to,
4495 we can do this only if the target does not support different pointer
4496 or address modes depending on the address space. */
4497 if (target_default_pointer_address_modes_p ()
4498 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4499 && mode == Pmode && REG_POINTER (x))
4500 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4501 #endif
4502
4503 {
4504 unsigned int copies_for_hook = 1, copies = 1;
4505 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4506 known_mode, known_ret,
4507 &copies_for_hook);
4508
4509 if (new_rtx)
4510 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4511 known_mode, known_ret);
4512
4513 if (copies > 1 || copies_for_hook > 1)
4514 return MAX (copies, copies_for_hook);
4515
4516 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4517 }
4518 break;
4519
4520 case MEM:
4521 #ifdef LOAD_EXTEND_OP
4522 /* Some RISC machines sign-extend all loads of smaller than a word. */
4523 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4524 return MAX (1, ((int) bitwidth
4525 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4526 #endif
4527 break;
4528
4529 case CONST_INT:
4530 /* If the constant is negative, take its 1's complement and remask.
4531 Then see how many zero bits we have. */
4532 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4533 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4534 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4535 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4536
4537 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4538
4539 case SUBREG:
4540 /* If this is a SUBREG for a promoted object that is sign-extended
4541 and we are looking at it in a wider mode, we know that at least the
4542 high-order bits are known to be sign bit copies. */
4543
4544 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4545 {
4546 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4547 known_x, known_mode, known_ret);
4548 return MAX ((int) bitwidth
4549 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4550 num0);
4551 }
4552
4553 /* For a smaller object, just ignore the high bits. */
4554 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4555 {
4556 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4557 known_x, known_mode, known_ret);
4558 return MAX (1, (num0
4559 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4560 - bitwidth)));
4561 }
4562
4563 #ifdef WORD_REGISTER_OPERATIONS
4564 #ifdef LOAD_EXTEND_OP
4565 /* For paradoxical SUBREGs on machines where all register operations
4566 affect the entire register, just look inside. Note that we are
4567 passing MODE to the recursive call, so the number of sign bit copies
4568 will remain relative to that mode, not the inner mode. */
4569
4570 /* This works only if loads sign extend. Otherwise, if we get a
4571 reload for the inner part, it may be loaded from the stack, and
4572 then we lose all sign bit copies that existed before the store
4573 to the stack. */
4574
4575 if (paradoxical_subreg_p (x)
4576 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4577 && MEM_P (SUBREG_REG (x)))
4578 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4579 known_x, known_mode, known_ret);
4580 #endif
4581 #endif
4582 break;
4583
4584 case SIGN_EXTRACT:
4585 if (CONST_INT_P (XEXP (x, 1)))
4586 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4587 break;
4588
4589 case SIGN_EXTEND:
4590 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4591 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4592 known_x, known_mode, known_ret));
4593
4594 case TRUNCATE:
4595 /* For a smaller object, just ignore the high bits. */
4596 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4597 known_x, known_mode, known_ret);
4598 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4599 - bitwidth)));
4600
4601 case NOT:
4602 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4603 known_x, known_mode, known_ret);
4604
4605 case ROTATE: case ROTATERT:
4606 /* If we are rotating left by a number of bits less than the number
4607 of sign bit copies, we can just subtract that amount from the
4608 number. */
4609 if (CONST_INT_P (XEXP (x, 1))
4610 && INTVAL (XEXP (x, 1)) >= 0
4611 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4612 {
4613 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4614 known_x, known_mode, known_ret);
4615 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4616 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4617 }
4618 break;
4619
4620 case NEG:
4621 /* In general, this subtracts one sign bit copy. But if the value
4622 is known to be positive, the number of sign bit copies is the
4623 same as that of the input. Finally, if the input has just one bit
4624 that might be nonzero, all the bits are copies of the sign bit. */
4625 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4626 known_x, known_mode, known_ret);
4627 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4628 return num0 > 1 ? num0 - 1 : 1;
4629
4630 nonzero = nonzero_bits (XEXP (x, 0), mode);
4631 if (nonzero == 1)
4632 return bitwidth;
4633
4634 if (num0 > 1
4635 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4636 num0--;
4637
4638 return num0;
4639
4640 case IOR: case AND: case XOR:
4641 case SMIN: case SMAX: case UMIN: case UMAX:
4642 /* Logical operations will preserve the number of sign-bit copies.
4643 MIN and MAX operations always return one of the operands. */
4644 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4645 known_x, known_mode, known_ret);
4646 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4647 known_x, known_mode, known_ret);
4648
4649 /* If num1 is clearing some of the top bits then regardless of
4650 the other term, we are guaranteed to have at least that many
4651 high-order zero bits. */
4652 if (code == AND
4653 && num1 > 1
4654 && bitwidth <= HOST_BITS_PER_WIDE_INT
4655 && CONST_INT_P (XEXP (x, 1))
4656 && (UINTVAL (XEXP (x, 1))
4657 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4658 return num1;
4659
4660 /* Similarly for IOR when setting high-order bits. */
4661 if (code == IOR
4662 && num1 > 1
4663 && bitwidth <= HOST_BITS_PER_WIDE_INT
4664 && CONST_INT_P (XEXP (x, 1))
4665 && (UINTVAL (XEXP (x, 1))
4666 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4667 return num1;
4668
4669 return MIN (num0, num1);
4670
4671 case PLUS: case MINUS:
4672 /* For addition and subtraction, we can have a 1-bit carry. However,
4673 if we are subtracting 1 from a positive number, there will not
4674 be such a carry. Furthermore, if the positive number is known to
4675 be 0 or 1, we know the result is either -1 or 0. */
4676
4677 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4678 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4679 {
4680 nonzero = nonzero_bits (XEXP (x, 0), mode);
4681 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4682 return (nonzero == 1 || nonzero == 0 ? bitwidth
4683 : bitwidth - floor_log2 (nonzero) - 1);
4684 }
4685
4686 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4687 known_x, known_mode, known_ret);
4688 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4689 known_x, known_mode, known_ret);
4690 result = MAX (1, MIN (num0, num1) - 1);
4691
4692 return result;
4693
4694 case MULT:
4695 /* The number of bits of the product is the sum of the number of
4696 bits of both terms. However, unless one of the terms if known
4697 to be positive, we must allow for an additional bit since negating
4698 a negative number can remove one sign bit copy. */
4699
4700 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4701 known_x, known_mode, known_ret);
4702 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4703 known_x, known_mode, known_ret);
4704
4705 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4706 if (result > 0
4707 && (bitwidth > HOST_BITS_PER_WIDE_INT
4708 || (((nonzero_bits (XEXP (x, 0), mode)
4709 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4710 && ((nonzero_bits (XEXP (x, 1), mode)
4711 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4712 != 0))))
4713 result--;
4714
4715 return MAX (1, result);
4716
4717 case UDIV:
4718 /* The result must be <= the first operand. If the first operand
4719 has the high bit set, we know nothing about the number of sign
4720 bit copies. */
4721 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4722 return 1;
4723 else if ((nonzero_bits (XEXP (x, 0), mode)
4724 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4725 return 1;
4726 else
4727 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4728 known_x, known_mode, known_ret);
4729
4730 case UMOD:
4731 /* The result must be <= the second operand. If the second operand
4732 has (or just might have) the high bit set, we know nothing about
4733 the number of sign bit copies. */
4734 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4735 return 1;
4736 else if ((nonzero_bits (XEXP (x, 1), mode)
4737 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4738 return 1;
4739 else
4740 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4741 known_x, known_mode, known_ret);
4742
4743 case DIV:
4744 /* Similar to unsigned division, except that we have to worry about
4745 the case where the divisor is negative, in which case we have
4746 to add 1. */
4747 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4748 known_x, known_mode, known_ret);
4749 if (result > 1
4750 && (bitwidth > HOST_BITS_PER_WIDE_INT
4751 || (nonzero_bits (XEXP (x, 1), mode)
4752 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4753 result--;
4754
4755 return result;
4756
4757 case MOD:
4758 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4759 known_x, known_mode, known_ret);
4760 if (result > 1
4761 && (bitwidth > HOST_BITS_PER_WIDE_INT
4762 || (nonzero_bits (XEXP (x, 1), mode)
4763 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4764 result--;
4765
4766 return result;
4767
4768 case ASHIFTRT:
4769 /* Shifts by a constant add to the number of bits equal to the
4770 sign bit. */
4771 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4772 known_x, known_mode, known_ret);
4773 if (CONST_INT_P (XEXP (x, 1))
4774 && INTVAL (XEXP (x, 1)) > 0
4775 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4776 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4777
4778 return num0;
4779
4780 case ASHIFT:
4781 /* Left shifts destroy copies. */
4782 if (!CONST_INT_P (XEXP (x, 1))
4783 || INTVAL (XEXP (x, 1)) < 0
4784 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4785 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4786 return 1;
4787
4788 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4789 known_x, known_mode, known_ret);
4790 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4791
4792 case IF_THEN_ELSE:
4793 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4794 known_x, known_mode, known_ret);
4795 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4796 known_x, known_mode, known_ret);
4797 return MIN (num0, num1);
4798
4799 case EQ: case NE: case GE: case GT: case LE: case LT:
4800 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4801 case GEU: case GTU: case LEU: case LTU:
4802 case UNORDERED: case ORDERED:
4803 /* If the constant is negative, take its 1's complement and remask.
4804 Then see how many zero bits we have. */
4805 nonzero = STORE_FLAG_VALUE;
4806 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4807 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4808 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4809
4810 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4811
4812 default:
4813 break;
4814 }
4815
4816 /* If we haven't been able to figure it out by one of the above rules,
4817 see if some of the high-order bits are known to be zero. If so,
4818 count those bits and return one less than that amount. If we can't
4819 safely compute the mask for this mode, always return BITWIDTH. */
4820
4821 bitwidth = GET_MODE_PRECISION (mode);
4822 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4823 return 1;
4824
4825 nonzero = nonzero_bits (x, mode);
4826 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4827 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4828 }
4829
4830 /* Calculate the rtx_cost of a single instruction. A return value of
4831 zero indicates an instruction pattern without a known cost. */
4832
4833 int
4834 insn_rtx_cost (rtx pat, bool speed)
4835 {
4836 int i, cost;
4837 rtx set;
4838
4839 /* Extract the single set rtx from the instruction pattern.
4840 We can't use single_set since we only have the pattern. */
4841 if (GET_CODE (pat) == SET)
4842 set = pat;
4843 else if (GET_CODE (pat) == PARALLEL)
4844 {
4845 set = NULL_RTX;
4846 for (i = 0; i < XVECLEN (pat, 0); i++)
4847 {
4848 rtx x = XVECEXP (pat, 0, i);
4849 if (GET_CODE (x) == SET)
4850 {
4851 if (set)
4852 return 0;
4853 set = x;
4854 }
4855 }
4856 if (!set)
4857 return 0;
4858 }
4859 else
4860 return 0;
4861
4862 cost = set_src_cost (SET_SRC (set), speed);
4863 return cost > 0 ? cost : COSTS_N_INSNS (1);
4864 }
4865
4866 /* Given an insn INSN and condition COND, return the condition in a
4867 canonical form to simplify testing by callers. Specifically:
4868
4869 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4870 (2) Both operands will be machine operands; (cc0) will have been replaced.
4871 (3) If an operand is a constant, it will be the second operand.
4872 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4873 for GE, GEU, and LEU.
4874
4875 If the condition cannot be understood, or is an inequality floating-point
4876 comparison which needs to be reversed, 0 will be returned.
4877
4878 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4879
4880 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4881 insn used in locating the condition was found. If a replacement test
4882 of the condition is desired, it should be placed in front of that
4883 insn and we will be sure that the inputs are still valid.
4884
4885 If WANT_REG is nonzero, we wish the condition to be relative to that
4886 register, if possible. Therefore, do not canonicalize the condition
4887 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4888 to be a compare to a CC mode register.
4889
4890 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4891 and at INSN. */
4892
4893 rtx
4894 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4895 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4896 {
4897 enum rtx_code code;
4898 rtx prev = insn;
4899 const_rtx set;
4900 rtx tem;
4901 rtx op0, op1;
4902 int reverse_code = 0;
4903 enum machine_mode mode;
4904 basic_block bb = BLOCK_FOR_INSN (insn);
4905
4906 code = GET_CODE (cond);
4907 mode = GET_MODE (cond);
4908 op0 = XEXP (cond, 0);
4909 op1 = XEXP (cond, 1);
4910
4911 if (reverse)
4912 code = reversed_comparison_code (cond, insn);
4913 if (code == UNKNOWN)
4914 return 0;
4915
4916 if (earliest)
4917 *earliest = insn;
4918
4919 /* If we are comparing a register with zero, see if the register is set
4920 in the previous insn to a COMPARE or a comparison operation. Perform
4921 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4922 in cse.c */
4923
4924 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4925 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4926 && op1 == CONST0_RTX (GET_MODE (op0))
4927 && op0 != want_reg)
4928 {
4929 /* Set nonzero when we find something of interest. */
4930 rtx x = 0;
4931
4932 #ifdef HAVE_cc0
4933 /* If comparison with cc0, import actual comparison from compare
4934 insn. */
4935 if (op0 == cc0_rtx)
4936 {
4937 if ((prev = prev_nonnote_insn (prev)) == 0
4938 || !NONJUMP_INSN_P (prev)
4939 || (set = single_set (prev)) == 0
4940 || SET_DEST (set) != cc0_rtx)
4941 return 0;
4942
4943 op0 = SET_SRC (set);
4944 op1 = CONST0_RTX (GET_MODE (op0));
4945 if (earliest)
4946 *earliest = prev;
4947 }
4948 #endif
4949
4950 /* If this is a COMPARE, pick up the two things being compared. */
4951 if (GET_CODE (op0) == COMPARE)
4952 {
4953 op1 = XEXP (op0, 1);
4954 op0 = XEXP (op0, 0);
4955 continue;
4956 }
4957 else if (!REG_P (op0))
4958 break;
4959
4960 /* Go back to the previous insn. Stop if it is not an INSN. We also
4961 stop if it isn't a single set or if it has a REG_INC note because
4962 we don't want to bother dealing with it. */
4963
4964 prev = prev_nonnote_nondebug_insn (prev);
4965
4966 if (prev == 0
4967 || !NONJUMP_INSN_P (prev)
4968 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4969 /* In cfglayout mode, there do not have to be labels at the
4970 beginning of a block, or jumps at the end, so the previous
4971 conditions would not stop us when we reach bb boundary. */
4972 || BLOCK_FOR_INSN (prev) != bb)
4973 break;
4974
4975 set = set_of (op0, prev);
4976
4977 if (set
4978 && (GET_CODE (set) != SET
4979 || !rtx_equal_p (SET_DEST (set), op0)))
4980 break;
4981
4982 /* If this is setting OP0, get what it sets it to if it looks
4983 relevant. */
4984 if (set)
4985 {
4986 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4987 #ifdef FLOAT_STORE_FLAG_VALUE
4988 REAL_VALUE_TYPE fsfv;
4989 #endif
4990
4991 /* ??? We may not combine comparisons done in a CCmode with
4992 comparisons not done in a CCmode. This is to aid targets
4993 like Alpha that have an IEEE compliant EQ instruction, and
4994 a non-IEEE compliant BEQ instruction. The use of CCmode is
4995 actually artificial, simply to prevent the combination, but
4996 should not affect other platforms.
4997
4998 However, we must allow VOIDmode comparisons to match either
4999 CCmode or non-CCmode comparison, because some ports have
5000 modeless comparisons inside branch patterns.
5001
5002 ??? This mode check should perhaps look more like the mode check
5003 in simplify_comparison in combine. */
5004
5005 if ((GET_CODE (SET_SRC (set)) == COMPARE
5006 || (((code == NE
5007 || (code == LT
5008 && val_signbit_known_set_p (inner_mode,
5009 STORE_FLAG_VALUE))
5010 #ifdef FLOAT_STORE_FLAG_VALUE
5011 || (code == LT
5012 && SCALAR_FLOAT_MODE_P (inner_mode)
5013 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5014 REAL_VALUE_NEGATIVE (fsfv)))
5015 #endif
5016 ))
5017 && COMPARISON_P (SET_SRC (set))))
5018 && (((GET_MODE_CLASS (mode) == MODE_CC)
5019 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5020 || mode == VOIDmode || inner_mode == VOIDmode))
5021 x = SET_SRC (set);
5022 else if (((code == EQ
5023 || (code == GE
5024 && val_signbit_known_set_p (inner_mode,
5025 STORE_FLAG_VALUE))
5026 #ifdef FLOAT_STORE_FLAG_VALUE
5027 || (code == GE
5028 && SCALAR_FLOAT_MODE_P (inner_mode)
5029 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5030 REAL_VALUE_NEGATIVE (fsfv)))
5031 #endif
5032 ))
5033 && COMPARISON_P (SET_SRC (set))
5034 && (((GET_MODE_CLASS (mode) == MODE_CC)
5035 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5036 || mode == VOIDmode || inner_mode == VOIDmode))
5037
5038 {
5039 reverse_code = 1;
5040 x = SET_SRC (set);
5041 }
5042 else
5043 break;
5044 }
5045
5046 else if (reg_set_p (op0, prev))
5047 /* If this sets OP0, but not directly, we have to give up. */
5048 break;
5049
5050 if (x)
5051 {
5052 /* If the caller is expecting the condition to be valid at INSN,
5053 make sure X doesn't change before INSN. */
5054 if (valid_at_insn_p)
5055 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5056 break;
5057 if (COMPARISON_P (x))
5058 code = GET_CODE (x);
5059 if (reverse_code)
5060 {
5061 code = reversed_comparison_code (x, prev);
5062 if (code == UNKNOWN)
5063 return 0;
5064 reverse_code = 0;
5065 }
5066
5067 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5068 if (earliest)
5069 *earliest = prev;
5070 }
5071 }
5072
5073 /* If constant is first, put it last. */
5074 if (CONSTANT_P (op0))
5075 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5076
5077 /* If OP0 is the result of a comparison, we weren't able to find what
5078 was really being compared, so fail. */
5079 if (!allow_cc_mode
5080 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5081 return 0;
5082
5083 /* Canonicalize any ordered comparison with integers involving equality
5084 if we can do computations in the relevant mode and we do not
5085 overflow. */
5086
5087 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5088 && CONST_INT_P (op1)
5089 && GET_MODE (op0) != VOIDmode
5090 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5091 {
5092 HOST_WIDE_INT const_val = INTVAL (op1);
5093 unsigned HOST_WIDE_INT uconst_val = const_val;
5094 unsigned HOST_WIDE_INT max_val
5095 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5096
5097 switch (code)
5098 {
5099 case LE:
5100 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5101 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5102 break;
5103
5104 /* When cross-compiling, const_val might be sign-extended from
5105 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5106 case GE:
5107 if ((const_val & max_val)
5108 != ((unsigned HOST_WIDE_INT) 1
5109 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5110 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5111 break;
5112
5113 case LEU:
5114 if (uconst_val < max_val)
5115 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5116 break;
5117
5118 case GEU:
5119 if (uconst_val != 0)
5120 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5121 break;
5122
5123 default:
5124 break;
5125 }
5126 }
5127
5128 /* Never return CC0; return zero instead. */
5129 if (CC0_P (op0))
5130 return 0;
5131
5132 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5133 }
5134
5135 /* Given a jump insn JUMP, return the condition that will cause it to branch
5136 to its JUMP_LABEL. If the condition cannot be understood, or is an
5137 inequality floating-point comparison which needs to be reversed, 0 will
5138 be returned.
5139
5140 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5141 insn used in locating the condition was found. If a replacement test
5142 of the condition is desired, it should be placed in front of that
5143 insn and we will be sure that the inputs are still valid. If EARLIEST
5144 is null, the returned condition will be valid at INSN.
5145
5146 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5147 compare CC mode register.
5148
5149 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5150
5151 rtx
5152 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5153 {
5154 rtx cond;
5155 int reverse;
5156 rtx set;
5157
5158 /* If this is not a standard conditional jump, we can't parse it. */
5159 if (!JUMP_P (jump)
5160 || ! any_condjump_p (jump))
5161 return 0;
5162 set = pc_set (jump);
5163
5164 cond = XEXP (SET_SRC (set), 0);
5165
5166 /* If this branches to JUMP_LABEL when the condition is false, reverse
5167 the condition. */
5168 reverse
5169 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5170 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5171
5172 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5173 allow_cc_mode, valid_at_insn_p);
5174 }
5175
5176 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5177 TARGET_MODE_REP_EXTENDED.
5178
5179 Note that we assume that the property of
5180 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5181 narrower than mode B. I.e., if A is a mode narrower than B then in
5182 order to be able to operate on it in mode B, mode A needs to
5183 satisfy the requirements set by the representation of mode B. */
5184
5185 static void
5186 init_num_sign_bit_copies_in_rep (void)
5187 {
5188 enum machine_mode mode, in_mode;
5189
5190 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5191 in_mode = GET_MODE_WIDER_MODE (mode))
5192 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5193 mode = GET_MODE_WIDER_MODE (mode))
5194 {
5195 enum machine_mode i;
5196
5197 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5198 extends to the next widest mode. */
5199 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5200 || GET_MODE_WIDER_MODE (mode) == in_mode);
5201
5202 /* We are in in_mode. Count how many bits outside of mode
5203 have to be copies of the sign-bit. */
5204 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5205 {
5206 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5207
5208 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5209 /* We can only check sign-bit copies starting from the
5210 top-bit. In order to be able to check the bits we
5211 have already seen we pretend that subsequent bits
5212 have to be sign-bit copies too. */
5213 || num_sign_bit_copies_in_rep [in_mode][mode])
5214 num_sign_bit_copies_in_rep [in_mode][mode]
5215 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5216 }
5217 }
5218 }
5219
5220 /* Suppose that truncation from the machine mode of X to MODE is not a
5221 no-op. See if there is anything special about X so that we can
5222 assume it already contains a truncated value of MODE. */
5223
5224 bool
5225 truncated_to_mode (enum machine_mode mode, const_rtx x)
5226 {
5227 /* This register has already been used in MODE without explicit
5228 truncation. */
5229 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5230 return true;
5231
5232 /* See if we already satisfy the requirements of MODE. If yes we
5233 can just switch to MODE. */
5234 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5235 && (num_sign_bit_copies (x, GET_MODE (x))
5236 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5237 return true;
5238
5239 return false;
5240 }
5241 \f
5242 /* Initialize non_rtx_starting_operands, which is used to speed up
5243 for_each_rtx. */
5244 void
5245 init_rtlanal (void)
5246 {
5247 int i;
5248 for (i = 0; i < NUM_RTX_CODE; i++)
5249 {
5250 const char *format = GET_RTX_FORMAT (i);
5251 const char *first = strpbrk (format, "eEV");
5252 non_rtx_starting_operands[i] = first ? first - format : -1;
5253 }
5254
5255 init_num_sign_bit_copies_in_rep ();
5256 }
5257 \f
5258 /* Check whether this is a constant pool constant. */
5259 bool
5260 constant_pool_constant_p (rtx x)
5261 {
5262 x = avoid_constant_pool_reference (x);
5263 return CONST_DOUBLE_P (x);
5264 }
5265 \f
5266 /* If M is a bitmask that selects a field of low-order bits within an item but
5267 not the entire word, return the length of the field. Return -1 otherwise.
5268 M is used in machine mode MODE. */
5269
5270 int
5271 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5272 {
5273 if (mode != VOIDmode)
5274 {
5275 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5276 return -1;
5277 m &= GET_MODE_MASK (mode);
5278 }
5279
5280 return exact_log2 (m + 1);
5281 }
5282
5283 /* Return the mode of MEM's address. */
5284
5285 enum machine_mode
5286 get_address_mode (rtx mem)
5287 {
5288 enum machine_mode mode;
5289
5290 gcc_assert (MEM_P (mem));
5291 mode = GET_MODE (XEXP (mem, 0));
5292 if (mode != VOIDmode)
5293 return mode;
5294 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5295 }
5296 \f
5297 /* Split up a CONST_DOUBLE or integer constant rtx
5298 into two rtx's for single words,
5299 storing in *FIRST the word that comes first in memory in the target
5300 and in *SECOND the other. */
5301
5302 void
5303 split_double (rtx value, rtx *first, rtx *second)
5304 {
5305 if (CONST_INT_P (value))
5306 {
5307 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5308 {
5309 /* In this case the CONST_INT holds both target words.
5310 Extract the bits from it into two word-sized pieces.
5311 Sign extend each half to HOST_WIDE_INT. */
5312 unsigned HOST_WIDE_INT low, high;
5313 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5314 unsigned bits_per_word = BITS_PER_WORD;
5315
5316 /* Set sign_bit to the most significant bit of a word. */
5317 sign_bit = 1;
5318 sign_bit <<= bits_per_word - 1;
5319
5320 /* Set mask so that all bits of the word are set. We could
5321 have used 1 << BITS_PER_WORD instead of basing the
5322 calculation on sign_bit. However, on machines where
5323 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5324 compiler warning, even though the code would never be
5325 executed. */
5326 mask = sign_bit << 1;
5327 mask--;
5328
5329 /* Set sign_extend as any remaining bits. */
5330 sign_extend = ~mask;
5331
5332 /* Pick the lower word and sign-extend it. */
5333 low = INTVAL (value);
5334 low &= mask;
5335 if (low & sign_bit)
5336 low |= sign_extend;
5337
5338 /* Pick the higher word, shifted to the least significant
5339 bits, and sign-extend it. */
5340 high = INTVAL (value);
5341 high >>= bits_per_word - 1;
5342 high >>= 1;
5343 high &= mask;
5344 if (high & sign_bit)
5345 high |= sign_extend;
5346
5347 /* Store the words in the target machine order. */
5348 if (WORDS_BIG_ENDIAN)
5349 {
5350 *first = GEN_INT (high);
5351 *second = GEN_INT (low);
5352 }
5353 else
5354 {
5355 *first = GEN_INT (low);
5356 *second = GEN_INT (high);
5357 }
5358 }
5359 else
5360 {
5361 /* The rule for using CONST_INT for a wider mode
5362 is that we regard the value as signed.
5363 So sign-extend it. */
5364 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5365 if (WORDS_BIG_ENDIAN)
5366 {
5367 *first = high;
5368 *second = value;
5369 }
5370 else
5371 {
5372 *first = value;
5373 *second = high;
5374 }
5375 }
5376 }
5377 else if (!CONST_DOUBLE_P (value))
5378 {
5379 if (WORDS_BIG_ENDIAN)
5380 {
5381 *first = const0_rtx;
5382 *second = value;
5383 }
5384 else
5385 {
5386 *first = value;
5387 *second = const0_rtx;
5388 }
5389 }
5390 else if (GET_MODE (value) == VOIDmode
5391 /* This is the old way we did CONST_DOUBLE integers. */
5392 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5393 {
5394 /* In an integer, the words are defined as most and least significant.
5395 So order them by the target's convention. */
5396 if (WORDS_BIG_ENDIAN)
5397 {
5398 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5399 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5400 }
5401 else
5402 {
5403 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5404 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5405 }
5406 }
5407 else
5408 {
5409 REAL_VALUE_TYPE r;
5410 long l[2];
5411 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5412
5413 /* Note, this converts the REAL_VALUE_TYPE to the target's
5414 format, splits up the floating point double and outputs
5415 exactly 32 bits of it into each of l[0] and l[1] --
5416 not necessarily BITS_PER_WORD bits. */
5417 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5418
5419 /* If 32 bits is an entire word for the target, but not for the host,
5420 then sign-extend on the host so that the number will look the same
5421 way on the host that it would on the target. See for instance
5422 simplify_unary_operation. The #if is needed to avoid compiler
5423 warnings. */
5424
5425 #if HOST_BITS_PER_LONG > 32
5426 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5427 {
5428 if (l[0] & ((long) 1 << 31))
5429 l[0] |= ((long) (-1) << 32);
5430 if (l[1] & ((long) 1 << 31))
5431 l[1] |= ((long) (-1) << 32);
5432 }
5433 #endif
5434
5435 *first = GEN_INT (l[0]);
5436 *second = GEN_INT (l[1]);
5437 }
5438 }
5439
5440 /* Strip outer address "mutations" from LOC and return a pointer to the
5441 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5442 stripped expression there.
5443
5444 "Mutations" either convert between modes or apply some kind of
5445 alignment. */
5446
5447 rtx *
5448 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5449 {
5450 for (;;)
5451 {
5452 enum rtx_code code = GET_CODE (*loc);
5453 if (GET_RTX_CLASS (code) == RTX_UNARY)
5454 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5455 used to convert between pointer sizes. */
5456 loc = &XEXP (*loc, 0);
5457 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5458 /* (and ... (const_int -X)) is used to align to X bytes. */
5459 loc = &XEXP (*loc, 0);
5460 else if (code == SUBREG
5461 && !OBJECT_P (SUBREG_REG (*loc))
5462 && subreg_lowpart_p (*loc))
5463 /* (subreg (operator ...) ...) inside and is used for mode
5464 conversion too. */
5465 loc = &SUBREG_REG (*loc);
5466 else
5467 return loc;
5468 if (outer_code)
5469 *outer_code = code;
5470 }
5471 }
5472
5473 /* Return true if X must be a base rather than an index. */
5474
5475 static bool
5476 must_be_base_p (rtx x)
5477 {
5478 return GET_CODE (x) == LO_SUM;
5479 }
5480
5481 /* Return true if X must be an index rather than a base. */
5482
5483 static bool
5484 must_be_index_p (rtx x)
5485 {
5486 return GET_CODE (x) == MULT || GET_CODE (x) == ASHIFT;
5487 }
5488
5489 /* Set the segment part of address INFO to LOC, given that INNER is the
5490 unmutated value. */
5491
5492 static void
5493 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5494 {
5495 gcc_checking_assert (GET_CODE (*inner) == UNSPEC);
5496
5497 gcc_assert (!info->segment);
5498 info->segment = loc;
5499 info->segment_term = inner;
5500 }
5501
5502 /* Set the base part of address INFO to LOC, given that INNER is the
5503 unmutated value. */
5504
5505 static void
5506 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5507 {
5508 if (GET_CODE (*inner) == LO_SUM)
5509 inner = strip_address_mutations (&XEXP (*inner, 0));
5510 gcc_checking_assert (REG_P (*inner)
5511 || MEM_P (*inner)
5512 || GET_CODE (*inner) == SUBREG);
5513
5514 gcc_assert (!info->base);
5515 info->base = loc;
5516 info->base_term = inner;
5517 }
5518
5519 /* Set the index part of address INFO to LOC, given that INNER is the
5520 unmutated value. */
5521
5522 static void
5523 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5524 {
5525 if ((GET_CODE (*inner) == MULT || GET_CODE (*inner) == ASHIFT)
5526 && CONSTANT_P (XEXP (*inner, 1)))
5527 inner = strip_address_mutations (&XEXP (*inner, 0));
5528 gcc_checking_assert (REG_P (*inner)
5529 || MEM_P (*inner)
5530 || GET_CODE (*inner) == SUBREG);
5531
5532 gcc_assert (!info->index);
5533 info->index = loc;
5534 info->index_term = inner;
5535 }
5536
5537 /* Set the displacement part of address INFO to LOC, given that INNER
5538 is the constant term. */
5539
5540 static void
5541 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5542 {
5543 gcc_checking_assert (CONSTANT_P (*inner));
5544
5545 gcc_assert (!info->disp);
5546 info->disp = loc;
5547 info->disp_term = inner;
5548 }
5549
5550 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5551 rest of INFO accordingly. */
5552
5553 static void
5554 decompose_incdec_address (struct address_info *info)
5555 {
5556 info->autoinc_p = true;
5557
5558 rtx *base = &XEXP (*info->inner, 0);
5559 set_address_base (info, base, base);
5560 gcc_checking_assert (info->base == info->base_term);
5561
5562 /* These addresses are only valid when the size of the addressed
5563 value is known. */
5564 gcc_checking_assert (info->mode != VOIDmode);
5565 }
5566
5567 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5568 of INFO accordingly. */
5569
5570 static void
5571 decompose_automod_address (struct address_info *info)
5572 {
5573 info->autoinc_p = true;
5574
5575 rtx *base = &XEXP (*info->inner, 0);
5576 set_address_base (info, base, base);
5577 gcc_checking_assert (info->base == info->base_term);
5578
5579 rtx plus = XEXP (*info->inner, 1);
5580 gcc_assert (GET_CODE (plus) == PLUS);
5581
5582 info->base_term2 = &XEXP (plus, 0);
5583 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5584
5585 rtx *step = &XEXP (plus, 1);
5586 rtx *inner_step = strip_address_mutations (step);
5587 if (CONSTANT_P (*inner_step))
5588 set_address_disp (info, step, inner_step);
5589 else
5590 set_address_index (info, step, inner_step);
5591 }
5592
5593 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5594 values in [PTR, END). Return a pointer to the end of the used array. */
5595
5596 static rtx **
5597 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5598 {
5599 rtx x = *loc;
5600 if (GET_CODE (x) == PLUS)
5601 {
5602 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5603 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5604 }
5605 else
5606 {
5607 gcc_assert (ptr != end);
5608 *ptr++ = loc;
5609 }
5610 return ptr;
5611 }
5612
5613 /* Evaluate the likelihood of X being a base or index value, returning
5614 positive if it is likely to be a base, negative if it is likely to be
5615 an index, and 0 if we can't tell. Make the magnitude of the return
5616 value reflect the amount of confidence we have in the answer.
5617
5618 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5619
5620 static int
5621 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5622 enum rtx_code outer_code, enum rtx_code index_code)
5623 {
5624 /* See whether we can be certain. */
5625 if (must_be_base_p (x))
5626 return 3;
5627 if (must_be_index_p (x))
5628 return -3;
5629
5630 /* Believe *_POINTER unless the address shape requires otherwise. */
5631 if (REG_P (x) && REG_POINTER (x))
5632 return 2;
5633 if (MEM_P (x) && MEM_POINTER (x))
5634 return 2;
5635
5636 if (REG_P (x) && HARD_REGISTER_P (x))
5637 {
5638 /* X is a hard register. If it only fits one of the base
5639 or index classes, choose that interpretation. */
5640 int regno = REGNO (x);
5641 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5642 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5643 if (base_p != index_p)
5644 return base_p ? 1 : -1;
5645 }
5646 return 0;
5647 }
5648
5649 /* INFO->INNER describes a normal, non-automodified address.
5650 Fill in the rest of INFO accordingly. */
5651
5652 static void
5653 decompose_normal_address (struct address_info *info)
5654 {
5655 /* Treat the address as the sum of up to four values. */
5656 rtx *ops[4];
5657 size_t n_ops = extract_plus_operands (info->inner, ops,
5658 ops + ARRAY_SIZE (ops)) - ops;
5659
5660 /* If there is more than one component, any base component is in a PLUS. */
5661 if (n_ops > 1)
5662 info->base_outer_code = PLUS;
5663
5664 /* Separate the parts that contain a REG or MEM from those that don't.
5665 Record the latter in INFO and leave the former in OPS. */
5666 rtx *inner_ops[4];
5667 size_t out = 0;
5668 for (size_t in = 0; in < n_ops; ++in)
5669 {
5670 rtx *loc = ops[in];
5671 rtx *inner = strip_address_mutations (loc);
5672 if (CONSTANT_P (*inner))
5673 set_address_disp (info, loc, inner);
5674 else if (GET_CODE (*inner) == UNSPEC)
5675 set_address_segment (info, loc, inner);
5676 else
5677 {
5678 ops[out] = loc;
5679 inner_ops[out] = inner;
5680 ++out;
5681 }
5682 }
5683
5684 /* Classify the remaining OPS members as bases and indexes. */
5685 if (out == 1)
5686 {
5687 /* Assume that the remaining value is a base unless the shape
5688 requires otherwise. */
5689 if (!must_be_index_p (*inner_ops[0]))
5690 set_address_base (info, ops[0], inner_ops[0]);
5691 else
5692 set_address_index (info, ops[0], inner_ops[0]);
5693 }
5694 else if (out == 2)
5695 {
5696 /* In the event of a tie, assume the base comes first. */
5697 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5698 GET_CODE (*ops[1]))
5699 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5700 GET_CODE (*ops[0])))
5701 {
5702 set_address_base (info, ops[0], inner_ops[0]);
5703 set_address_index (info, ops[1], inner_ops[1]);
5704 }
5705 else
5706 {
5707 set_address_base (info, ops[1], inner_ops[1]);
5708 set_address_index (info, ops[0], inner_ops[0]);
5709 }
5710 }
5711 else
5712 gcc_assert (out == 0);
5713 }
5714
5715 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5716 or VOIDmode if not known. AS is the address space associated with LOC.
5717 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5718
5719 void
5720 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5721 addr_space_t as, enum rtx_code outer_code)
5722 {
5723 memset (info, 0, sizeof (*info));
5724 info->mode = mode;
5725 info->as = as;
5726 info->addr_outer_code = outer_code;
5727 info->outer = loc;
5728 info->inner = strip_address_mutations (loc, &outer_code);
5729 info->base_outer_code = outer_code;
5730 switch (GET_CODE (*info->inner))
5731 {
5732 case PRE_DEC:
5733 case PRE_INC:
5734 case POST_DEC:
5735 case POST_INC:
5736 decompose_incdec_address (info);
5737 break;
5738
5739 case PRE_MODIFY:
5740 case POST_MODIFY:
5741 decompose_automod_address (info);
5742 break;
5743
5744 default:
5745 decompose_normal_address (info);
5746 break;
5747 }
5748 }
5749
5750 /* Describe address operand LOC in INFO. */
5751
5752 void
5753 decompose_lea_address (struct address_info *info, rtx *loc)
5754 {
5755 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5756 }
5757
5758 /* Describe the address of MEM X in INFO. */
5759
5760 void
5761 decompose_mem_address (struct address_info *info, rtx x)
5762 {
5763 gcc_assert (MEM_P (x));
5764 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5765 MEM_ADDR_SPACE (x), MEM);
5766 }
5767
5768 /* Update INFO after a change to the address it describes. */
5769
5770 void
5771 update_address (struct address_info *info)
5772 {
5773 decompose_address (info, info->outer, info->mode, info->as,
5774 info->addr_outer_code);
5775 }
5776
5777 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5778 more complicated than that. */
5779
5780 HOST_WIDE_INT
5781 get_index_scale (const struct address_info *info)
5782 {
5783 rtx index = *info->index;
5784 if (GET_CODE (index) == MULT
5785 && CONST_INT_P (XEXP (index, 1))
5786 && info->index_term == &XEXP (index, 0))
5787 return INTVAL (XEXP (index, 1));
5788
5789 if (GET_CODE (index) == ASHIFT
5790 && CONST_INT_P (XEXP (index, 1))
5791 && info->index_term == &XEXP (index, 0))
5792 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
5793
5794 if (info->index == info->index_term)
5795 return 1;
5796
5797 return 0;
5798 }
5799
5800 /* Return the "index code" of INFO, in the form required by
5801 ok_for_base_p_1. */
5802
5803 enum rtx_code
5804 get_index_code (const struct address_info *info)
5805 {
5806 if (info->index)
5807 return GET_CODE (*info->index);
5808
5809 if (info->disp)
5810 return GET_CODE (*info->disp);
5811
5812 return SCRATCH;
5813 }