lto-cgraph.c (get_alias_symbol): Remove weakref sanity check.
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 \f
81 /* Return 1 if the value of X is unstable
82 (would be different at a different point in the program).
83 The frame pointer, arg pointer, etc. are considered stable
84 (within one function) and so is anything marked `unchanging'. */
85
86 int
87 rtx_unstable_p (const_rtx x)
88 {
89 const RTX_CODE code = GET_CODE (x);
90 int i;
91 const char *fmt;
92
93 switch (code)
94 {
95 case MEM:
96 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
97
98 case CONST:
99 CASE_CONST_ANY:
100 case SYMBOL_REF:
101 case LABEL_REF:
102 return 0;
103
104 case REG:
105 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
106 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
107 /* The arg pointer varies if it is not a fixed register. */
108 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
109 return 0;
110 /* ??? When call-clobbered, the value is stable modulo the restore
111 that must happen after a call. This currently screws up local-alloc
112 into believing that the restore is not needed. */
113 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
114 return 0;
115 return 1;
116
117 case ASM_OPERANDS:
118 if (MEM_VOLATILE_P (x))
119 return 1;
120
121 /* Fall through. */
122
123 default:
124 break;
125 }
126
127 fmt = GET_RTX_FORMAT (code);
128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
129 if (fmt[i] == 'e')
130 {
131 if (rtx_unstable_p (XEXP (x, i)))
132 return 1;
133 }
134 else if (fmt[i] == 'E')
135 {
136 int j;
137 for (j = 0; j < XVECLEN (x, i); j++)
138 if (rtx_unstable_p (XVECEXP (x, i, j)))
139 return 1;
140 }
141
142 return 0;
143 }
144
145 /* Return 1 if X has a value that can vary even between two
146 executions of the program. 0 means X can be compared reliably
147 against certain constants or near-constants.
148 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
149 zero, we are slightly more conservative.
150 The frame pointer and the arg pointer are considered constant. */
151
152 bool
153 rtx_varies_p (const_rtx x, bool for_alias)
154 {
155 RTX_CODE code;
156 int i;
157 const char *fmt;
158
159 if (!x)
160 return 0;
161
162 code = GET_CODE (x);
163 switch (code)
164 {
165 case MEM:
166 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
167
168 case CONST:
169 CASE_CONST_ANY:
170 case SYMBOL_REF:
171 case LABEL_REF:
172 return 0;
173
174 case REG:
175 /* Note that we have to test for the actual rtx used for the frame
176 and arg pointers and not just the register number in case we have
177 eliminated the frame and/or arg pointer and are using it
178 for pseudos. */
179 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
180 /* The arg pointer varies if it is not a fixed register. */
181 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
182 return 0;
183 if (x == pic_offset_table_rtx
184 /* ??? When call-clobbered, the value is stable modulo the restore
185 that must happen after a call. This currently screws up
186 local-alloc into believing that the restore is not needed, so we
187 must return 0 only if we are called from alias analysis. */
188 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
189 return 0;
190 return 1;
191
192 case LO_SUM:
193 /* The operand 0 of a LO_SUM is considered constant
194 (in fact it is related specifically to operand 1)
195 during alias analysis. */
196 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
197 || rtx_varies_p (XEXP (x, 1), for_alias);
198
199 case ASM_OPERANDS:
200 if (MEM_VOLATILE_P (x))
201 return 1;
202
203 /* Fall through. */
204
205 default:
206 break;
207 }
208
209 fmt = GET_RTX_FORMAT (code);
210 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
211 if (fmt[i] == 'e')
212 {
213 if (rtx_varies_p (XEXP (x, i), for_alias))
214 return 1;
215 }
216 else if (fmt[i] == 'E')
217 {
218 int j;
219 for (j = 0; j < XVECLEN (x, i); j++)
220 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
221 return 1;
222 }
223
224 return 0;
225 }
226
227 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
228 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
229 whether nonzero is returned for unaligned memory accesses on strict
230 alignment machines. */
231
232 static int
233 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
234 enum machine_mode mode, bool unaligned_mems)
235 {
236 enum rtx_code code = GET_CODE (x);
237
238 if (STRICT_ALIGNMENT
239 && unaligned_mems
240 && GET_MODE_SIZE (mode) != 0)
241 {
242 HOST_WIDE_INT actual_offset = offset;
243 #ifdef SPARC_STACK_BOUNDARY_HACK
244 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
245 the real alignment of %sp. However, when it does this, the
246 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
247 if (SPARC_STACK_BOUNDARY_HACK
248 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
249 actual_offset -= STACK_POINTER_OFFSET;
250 #endif
251
252 if (actual_offset % GET_MODE_SIZE (mode) != 0)
253 return 1;
254 }
255
256 switch (code)
257 {
258 case SYMBOL_REF:
259 if (SYMBOL_REF_WEAK (x))
260 return 1;
261 if (!CONSTANT_POOL_ADDRESS_P (x))
262 {
263 tree decl;
264 HOST_WIDE_INT decl_size;
265
266 if (offset < 0)
267 return 1;
268 if (size == 0)
269 size = GET_MODE_SIZE (mode);
270 if (size == 0)
271 return offset != 0;
272
273 /* If the size of the access or of the symbol is unknown,
274 assume the worst. */
275 decl = SYMBOL_REF_DECL (x);
276
277 /* Else check that the access is in bounds. TODO: restructure
278 expr_size/tree_expr_size/int_expr_size and just use the latter. */
279 if (!decl)
280 decl_size = -1;
281 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
282 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
283 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
284 : -1);
285 else if (TREE_CODE (decl) == STRING_CST)
286 decl_size = TREE_STRING_LENGTH (decl);
287 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
288 decl_size = int_size_in_bytes (TREE_TYPE (decl));
289 else
290 decl_size = -1;
291
292 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
293 }
294
295 return 0;
296
297 case LABEL_REF:
298 return 0;
299
300 case REG:
301 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
302 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
303 || x == stack_pointer_rtx
304 /* The arg pointer varies if it is not a fixed register. */
305 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
306 return 0;
307 /* All of the virtual frame registers are stack references. */
308 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
309 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
310 return 0;
311 return 1;
312
313 case CONST:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
315 mode, unaligned_mems);
316
317 case PLUS:
318 /* An address is assumed not to trap if:
319 - it is the pic register plus a constant. */
320 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
321 return 0;
322
323 /* - or it is an address that can't trap plus a constant integer,
324 with the proper remainder modulo the mode size if we are
325 considering unaligned memory references. */
326 if (CONST_INT_P (XEXP (x, 1))
327 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
328 size, mode, unaligned_mems))
329 return 0;
330
331 return 1;
332
333 case LO_SUM:
334 case PRE_MODIFY:
335 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
336 mode, unaligned_mems);
337
338 case PRE_DEC:
339 case PRE_INC:
340 case POST_DEC:
341 case POST_INC:
342 case POST_MODIFY:
343 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
344 mode, unaligned_mems);
345
346 default:
347 break;
348 }
349
350 /* If it isn't one of the case above, it can cause a trap. */
351 return 1;
352 }
353
354 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
355
356 int
357 rtx_addr_can_trap_p (const_rtx x)
358 {
359 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
360 }
361
362 /* Return true if X is an address that is known to not be zero. */
363
364 bool
365 nonzero_address_p (const_rtx x)
366 {
367 const enum rtx_code code = GET_CODE (x);
368
369 switch (code)
370 {
371 case SYMBOL_REF:
372 return !SYMBOL_REF_WEAK (x);
373
374 case LABEL_REF:
375 return true;
376
377 case REG:
378 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
379 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
380 || x == stack_pointer_rtx
381 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
382 return true;
383 /* All of the virtual frame registers are stack references. */
384 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
385 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
386 return true;
387 return false;
388
389 case CONST:
390 return nonzero_address_p (XEXP (x, 0));
391
392 case PLUS:
393 /* Handle PIC references. */
394 if (XEXP (x, 0) == pic_offset_table_rtx
395 && CONSTANT_P (XEXP (x, 1)))
396 return true;
397 return false;
398
399 case PRE_MODIFY:
400 /* Similar to the above; allow positive offsets. Further, since
401 auto-inc is only allowed in memories, the register must be a
402 pointer. */
403 if (CONST_INT_P (XEXP (x, 1))
404 && INTVAL (XEXP (x, 1)) > 0)
405 return true;
406 return nonzero_address_p (XEXP (x, 0));
407
408 case PRE_INC:
409 /* Similarly. Further, the offset is always positive. */
410 return true;
411
412 case PRE_DEC:
413 case POST_DEC:
414 case POST_INC:
415 case POST_MODIFY:
416 return nonzero_address_p (XEXP (x, 0));
417
418 case LO_SUM:
419 return nonzero_address_p (XEXP (x, 1));
420
421 default:
422 break;
423 }
424
425 /* If it isn't one of the case above, might be zero. */
426 return false;
427 }
428
429 /* Return 1 if X refers to a memory location whose address
430 cannot be compared reliably with constant addresses,
431 or if X refers to a BLKmode memory object.
432 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
433 zero, we are slightly more conservative. */
434
435 bool
436 rtx_addr_varies_p (const_rtx x, bool for_alias)
437 {
438 enum rtx_code code;
439 int i;
440 const char *fmt;
441
442 if (x == 0)
443 return 0;
444
445 code = GET_CODE (x);
446 if (code == MEM)
447 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
448
449 fmt = GET_RTX_FORMAT (code);
450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
451 if (fmt[i] == 'e')
452 {
453 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
454 return 1;
455 }
456 else if (fmt[i] == 'E')
457 {
458 int j;
459 for (j = 0; j < XVECLEN (x, i); j++)
460 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
461 return 1;
462 }
463 return 0;
464 }
465 \f
466 /* Return the CALL in X if there is one. */
467
468 rtx
469 get_call_rtx_from (rtx x)
470 {
471 if (INSN_P (x))
472 x = PATTERN (x);
473 if (GET_CODE (x) == PARALLEL)
474 x = XVECEXP (x, 0, 0);
475 if (GET_CODE (x) == SET)
476 x = SET_SRC (x);
477 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
478 return x;
479 return NULL_RTX;
480 }
481 \f
482 /* Return the value of the integer term in X, if one is apparent;
483 otherwise return 0.
484 Only obvious integer terms are detected.
485 This is used in cse.c with the `related_value' field. */
486
487 HOST_WIDE_INT
488 get_integer_term (const_rtx x)
489 {
490 if (GET_CODE (x) == CONST)
491 x = XEXP (x, 0);
492
493 if (GET_CODE (x) == MINUS
494 && CONST_INT_P (XEXP (x, 1)))
495 return - INTVAL (XEXP (x, 1));
496 if (GET_CODE (x) == PLUS
497 && CONST_INT_P (XEXP (x, 1)))
498 return INTVAL (XEXP (x, 1));
499 return 0;
500 }
501
502 /* If X is a constant, return the value sans apparent integer term;
503 otherwise return 0.
504 Only obvious integer terms are detected. */
505
506 rtx
507 get_related_value (const_rtx x)
508 {
509 if (GET_CODE (x) != CONST)
510 return 0;
511 x = XEXP (x, 0);
512 if (GET_CODE (x) == PLUS
513 && CONST_INT_P (XEXP (x, 1)))
514 return XEXP (x, 0);
515 else if (GET_CODE (x) == MINUS
516 && CONST_INT_P (XEXP (x, 1)))
517 return XEXP (x, 0);
518 return 0;
519 }
520 \f
521 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
522 to somewhere in the same object or object_block as SYMBOL. */
523
524 bool
525 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
526 {
527 tree decl;
528
529 if (GET_CODE (symbol) != SYMBOL_REF)
530 return false;
531
532 if (offset == 0)
533 return true;
534
535 if (offset > 0)
536 {
537 if (CONSTANT_POOL_ADDRESS_P (symbol)
538 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
539 return true;
540
541 decl = SYMBOL_REF_DECL (symbol);
542 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
543 return true;
544 }
545
546 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
547 && SYMBOL_REF_BLOCK (symbol)
548 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
549 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
550 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
551 return true;
552
553 return false;
554 }
555
556 /* Split X into a base and a constant offset, storing them in *BASE_OUT
557 and *OFFSET_OUT respectively. */
558
559 void
560 split_const (rtx x, rtx *base_out, rtx *offset_out)
561 {
562 if (GET_CODE (x) == CONST)
563 {
564 x = XEXP (x, 0);
565 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
566 {
567 *base_out = XEXP (x, 0);
568 *offset_out = XEXP (x, 1);
569 return;
570 }
571 }
572 *base_out = x;
573 *offset_out = const0_rtx;
574 }
575 \f
576 /* Return the number of places FIND appears within X. If COUNT_DEST is
577 zero, we do not count occurrences inside the destination of a SET. */
578
579 int
580 count_occurrences (const_rtx x, const_rtx find, int count_dest)
581 {
582 int i, j;
583 enum rtx_code code;
584 const char *format_ptr;
585 int count;
586
587 if (x == find)
588 return 1;
589
590 code = GET_CODE (x);
591
592 switch (code)
593 {
594 case REG:
595 CASE_CONST_ANY:
596 case SYMBOL_REF:
597 case CODE_LABEL:
598 case PC:
599 case CC0:
600 return 0;
601
602 case EXPR_LIST:
603 count = count_occurrences (XEXP (x, 0), find, count_dest);
604 if (XEXP (x, 1))
605 count += count_occurrences (XEXP (x, 1), find, count_dest);
606 return count;
607
608 case MEM:
609 if (MEM_P (find) && rtx_equal_p (x, find))
610 return 1;
611 break;
612
613 case SET:
614 if (SET_DEST (x) == find && ! count_dest)
615 return count_occurrences (SET_SRC (x), find, count_dest);
616 break;
617
618 default:
619 break;
620 }
621
622 format_ptr = GET_RTX_FORMAT (code);
623 count = 0;
624
625 for (i = 0; i < GET_RTX_LENGTH (code); i++)
626 {
627 switch (*format_ptr++)
628 {
629 case 'e':
630 count += count_occurrences (XEXP (x, i), find, count_dest);
631 break;
632
633 case 'E':
634 for (j = 0; j < XVECLEN (x, i); j++)
635 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
636 break;
637 }
638 }
639 return count;
640 }
641
642 \f
643 /* Return TRUE if OP is a register or subreg of a register that
644 holds an unsigned quantity. Otherwise, return FALSE. */
645
646 bool
647 unsigned_reg_p (rtx op)
648 {
649 if (REG_P (op)
650 && REG_EXPR (op)
651 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
652 return true;
653
654 if (GET_CODE (op) == SUBREG
655 && SUBREG_PROMOTED_UNSIGNED_P (op))
656 return true;
657
658 return false;
659 }
660
661 \f
662 /* Nonzero if register REG appears somewhere within IN.
663 Also works if REG is not a register; in this case it checks
664 for a subexpression of IN that is Lisp "equal" to REG. */
665
666 int
667 reg_mentioned_p (const_rtx reg, const_rtx in)
668 {
669 const char *fmt;
670 int i;
671 enum rtx_code code;
672
673 if (in == 0)
674 return 0;
675
676 if (reg == in)
677 return 1;
678
679 if (GET_CODE (in) == LABEL_REF)
680 return reg == XEXP (in, 0);
681
682 code = GET_CODE (in);
683
684 switch (code)
685 {
686 /* Compare registers by number. */
687 case REG:
688 return REG_P (reg) && REGNO (in) == REGNO (reg);
689
690 /* These codes have no constituent expressions
691 and are unique. */
692 case SCRATCH:
693 case CC0:
694 case PC:
695 return 0;
696
697 CASE_CONST_ANY:
698 /* These are kept unique for a given value. */
699 return 0;
700
701 default:
702 break;
703 }
704
705 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
706 return 1;
707
708 fmt = GET_RTX_FORMAT (code);
709
710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
711 {
712 if (fmt[i] == 'E')
713 {
714 int j;
715 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
716 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
717 return 1;
718 }
719 else if (fmt[i] == 'e'
720 && reg_mentioned_p (reg, XEXP (in, i)))
721 return 1;
722 }
723 return 0;
724 }
725 \f
726 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
727 no CODE_LABEL insn. */
728
729 int
730 no_labels_between_p (const_rtx beg, const_rtx end)
731 {
732 rtx p;
733 if (beg == end)
734 return 0;
735 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
736 if (LABEL_P (p))
737 return 0;
738 return 1;
739 }
740
741 /* Nonzero if register REG is used in an insn between
742 FROM_INSN and TO_INSN (exclusive of those two). */
743
744 int
745 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
746 {
747 rtx insn;
748
749 if (from_insn == to_insn)
750 return 0;
751
752 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
753 if (NONDEBUG_INSN_P (insn)
754 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
755 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
756 return 1;
757 return 0;
758 }
759 \f
760 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
761 is entirely replaced by a new value and the only use is as a SET_DEST,
762 we do not consider it a reference. */
763
764 int
765 reg_referenced_p (const_rtx x, const_rtx body)
766 {
767 int i;
768
769 switch (GET_CODE (body))
770 {
771 case SET:
772 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
773 return 1;
774
775 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
776 of a REG that occupies all of the REG, the insn references X if
777 it is mentioned in the destination. */
778 if (GET_CODE (SET_DEST (body)) != CC0
779 && GET_CODE (SET_DEST (body)) != PC
780 && !REG_P (SET_DEST (body))
781 && ! (GET_CODE (SET_DEST (body)) == SUBREG
782 && REG_P (SUBREG_REG (SET_DEST (body)))
783 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
784 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
785 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
786 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
787 && reg_overlap_mentioned_p (x, SET_DEST (body)))
788 return 1;
789 return 0;
790
791 case ASM_OPERANDS:
792 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
793 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
794 return 1;
795 return 0;
796
797 case CALL:
798 case USE:
799 case IF_THEN_ELSE:
800 return reg_overlap_mentioned_p (x, body);
801
802 case TRAP_IF:
803 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
804
805 case PREFETCH:
806 return reg_overlap_mentioned_p (x, XEXP (body, 0));
807
808 case UNSPEC:
809 case UNSPEC_VOLATILE:
810 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
811 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
812 return 1;
813 return 0;
814
815 case PARALLEL:
816 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
817 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
818 return 1;
819 return 0;
820
821 case CLOBBER:
822 if (MEM_P (XEXP (body, 0)))
823 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
824 return 1;
825 return 0;
826
827 case COND_EXEC:
828 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
829 return 1;
830 return reg_referenced_p (x, COND_EXEC_CODE (body));
831
832 default:
833 return 0;
834 }
835 }
836 \f
837 /* Nonzero if register REG is set or clobbered in an insn between
838 FROM_INSN and TO_INSN (exclusive of those two). */
839
840 int
841 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
842 {
843 const_rtx insn;
844
845 if (from_insn == to_insn)
846 return 0;
847
848 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
849 if (INSN_P (insn) && reg_set_p (reg, insn))
850 return 1;
851 return 0;
852 }
853
854 /* Internals of reg_set_between_p. */
855 int
856 reg_set_p (const_rtx reg, const_rtx insn)
857 {
858 /* We can be passed an insn or part of one. If we are passed an insn,
859 check if a side-effect of the insn clobbers REG. */
860 if (INSN_P (insn)
861 && (FIND_REG_INC_NOTE (insn, reg)
862 || (CALL_P (insn)
863 && ((REG_P (reg)
864 && REGNO (reg) < FIRST_PSEUDO_REGISTER
865 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
866 GET_MODE (reg), REGNO (reg)))
867 || MEM_P (reg)
868 || find_reg_fusage (insn, CLOBBER, reg)))))
869 return 1;
870
871 return set_of (reg, insn) != NULL_RTX;
872 }
873
874 /* Similar to reg_set_between_p, but check all registers in X. Return 0
875 only if none of them are modified between START and END. Return 1 if
876 X contains a MEM; this routine does use memory aliasing. */
877
878 int
879 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
880 {
881 const enum rtx_code code = GET_CODE (x);
882 const char *fmt;
883 int i, j;
884 rtx insn;
885
886 if (start == end)
887 return 0;
888
889 switch (code)
890 {
891 CASE_CONST_ANY:
892 case CONST:
893 case SYMBOL_REF:
894 case LABEL_REF:
895 return 0;
896
897 case PC:
898 case CC0:
899 return 1;
900
901 case MEM:
902 if (modified_between_p (XEXP (x, 0), start, end))
903 return 1;
904 if (MEM_READONLY_P (x))
905 return 0;
906 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
907 if (memory_modified_in_insn_p (x, insn))
908 return 1;
909 return 0;
910 break;
911
912 case REG:
913 return reg_set_between_p (x, start, end);
914
915 default:
916 break;
917 }
918
919 fmt = GET_RTX_FORMAT (code);
920 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
921 {
922 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
923 return 1;
924
925 else if (fmt[i] == 'E')
926 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
927 if (modified_between_p (XVECEXP (x, i, j), start, end))
928 return 1;
929 }
930
931 return 0;
932 }
933
934 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
935 of them are modified in INSN. Return 1 if X contains a MEM; this routine
936 does use memory aliasing. */
937
938 int
939 modified_in_p (const_rtx x, const_rtx insn)
940 {
941 const enum rtx_code code = GET_CODE (x);
942 const char *fmt;
943 int i, j;
944
945 switch (code)
946 {
947 CASE_CONST_ANY:
948 case CONST:
949 case SYMBOL_REF:
950 case LABEL_REF:
951 return 0;
952
953 case PC:
954 case CC0:
955 return 1;
956
957 case MEM:
958 if (modified_in_p (XEXP (x, 0), insn))
959 return 1;
960 if (MEM_READONLY_P (x))
961 return 0;
962 if (memory_modified_in_insn_p (x, insn))
963 return 1;
964 return 0;
965 break;
966
967 case REG:
968 return reg_set_p (x, insn);
969
970 default:
971 break;
972 }
973
974 fmt = GET_RTX_FORMAT (code);
975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
976 {
977 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
978 return 1;
979
980 else if (fmt[i] == 'E')
981 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
982 if (modified_in_p (XVECEXP (x, i, j), insn))
983 return 1;
984 }
985
986 return 0;
987 }
988 \f
989 /* Helper function for set_of. */
990 struct set_of_data
991 {
992 const_rtx found;
993 const_rtx pat;
994 };
995
996 static void
997 set_of_1 (rtx x, const_rtx pat, void *data1)
998 {
999 struct set_of_data *const data = (struct set_of_data *) (data1);
1000 if (rtx_equal_p (x, data->pat)
1001 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1002 data->found = pat;
1003 }
1004
1005 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1006 (either directly or via STRICT_LOW_PART and similar modifiers). */
1007 const_rtx
1008 set_of (const_rtx pat, const_rtx insn)
1009 {
1010 struct set_of_data data;
1011 data.found = NULL_RTX;
1012 data.pat = pat;
1013 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1014 return data.found;
1015 }
1016
1017 /* This function, called through note_stores, collects sets and
1018 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1019 by DATA. */
1020 void
1021 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1022 {
1023 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1024 if (REG_P (x) && HARD_REGISTER_P (x))
1025 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1026 }
1027
1028 /* Examine INSN, and compute the set of hard registers written by it.
1029 Store it in *PSET. Should only be called after reload. */
1030 void
1031 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
1032 {
1033 rtx link;
1034
1035 CLEAR_HARD_REG_SET (*pset);
1036 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1037 if (CALL_P (insn))
1038 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1039 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1040 if (REG_NOTE_KIND (link) == REG_INC)
1041 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1042 }
1043
1044 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1045 static int
1046 record_hard_reg_uses_1 (rtx *px, void *data)
1047 {
1048 rtx x = *px;
1049 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1050
1051 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1052 {
1053 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1054 while (nregs-- > 0)
1055 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1056 }
1057 return 0;
1058 }
1059
1060 /* Like record_hard_reg_sets, but called through note_uses. */
1061 void
1062 record_hard_reg_uses (rtx *px, void *data)
1063 {
1064 for_each_rtx (px, record_hard_reg_uses_1, data);
1065 }
1066 \f
1067 /* Given an INSN, return a SET expression if this insn has only a single SET.
1068 It may also have CLOBBERs, USEs, or SET whose output
1069 will not be used, which we ignore. */
1070
1071 rtx
1072 single_set_2 (const_rtx insn, const_rtx pat)
1073 {
1074 rtx set = NULL;
1075 int set_verified = 1;
1076 int i;
1077
1078 if (GET_CODE (pat) == PARALLEL)
1079 {
1080 for (i = 0; i < XVECLEN (pat, 0); i++)
1081 {
1082 rtx sub = XVECEXP (pat, 0, i);
1083 switch (GET_CODE (sub))
1084 {
1085 case USE:
1086 case CLOBBER:
1087 break;
1088
1089 case SET:
1090 /* We can consider insns having multiple sets, where all
1091 but one are dead as single set insns. In common case
1092 only single set is present in the pattern so we want
1093 to avoid checking for REG_UNUSED notes unless necessary.
1094
1095 When we reach set first time, we just expect this is
1096 the single set we are looking for and only when more
1097 sets are found in the insn, we check them. */
1098 if (!set_verified)
1099 {
1100 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1101 && !side_effects_p (set))
1102 set = NULL;
1103 else
1104 set_verified = 1;
1105 }
1106 if (!set)
1107 set = sub, set_verified = 0;
1108 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1109 || side_effects_p (sub))
1110 return NULL_RTX;
1111 break;
1112
1113 default:
1114 return NULL_RTX;
1115 }
1116 }
1117 }
1118 return set;
1119 }
1120
1121 /* Given an INSN, return nonzero if it has more than one SET, else return
1122 zero. */
1123
1124 int
1125 multiple_sets (const_rtx insn)
1126 {
1127 int found;
1128 int i;
1129
1130 /* INSN must be an insn. */
1131 if (! INSN_P (insn))
1132 return 0;
1133
1134 /* Only a PARALLEL can have multiple SETs. */
1135 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1136 {
1137 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1138 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1139 {
1140 /* If we have already found a SET, then return now. */
1141 if (found)
1142 return 1;
1143 else
1144 found = 1;
1145 }
1146 }
1147
1148 /* Either zero or one SET. */
1149 return 0;
1150 }
1151 \f
1152 /* Return nonzero if the destination of SET equals the source
1153 and there are no side effects. */
1154
1155 int
1156 set_noop_p (const_rtx set)
1157 {
1158 rtx src = SET_SRC (set);
1159 rtx dst = SET_DEST (set);
1160
1161 if (dst == pc_rtx && src == pc_rtx)
1162 return 1;
1163
1164 if (MEM_P (dst) && MEM_P (src))
1165 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1166
1167 if (GET_CODE (dst) == ZERO_EXTRACT)
1168 return rtx_equal_p (XEXP (dst, 0), src)
1169 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1170 && !side_effects_p (src);
1171
1172 if (GET_CODE (dst) == STRICT_LOW_PART)
1173 dst = XEXP (dst, 0);
1174
1175 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1176 {
1177 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1178 return 0;
1179 src = SUBREG_REG (src);
1180 dst = SUBREG_REG (dst);
1181 }
1182
1183 return (REG_P (src) && REG_P (dst)
1184 && REGNO (src) == REGNO (dst));
1185 }
1186 \f
1187 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1188 value to itself. */
1189
1190 int
1191 noop_move_p (const_rtx insn)
1192 {
1193 rtx pat = PATTERN (insn);
1194
1195 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1196 return 1;
1197
1198 /* Insns carrying these notes are useful later on. */
1199 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1200 return 0;
1201
1202 if (GET_CODE (pat) == SET && set_noop_p (pat))
1203 return 1;
1204
1205 if (GET_CODE (pat) == PARALLEL)
1206 {
1207 int i;
1208 /* If nothing but SETs of registers to themselves,
1209 this insn can also be deleted. */
1210 for (i = 0; i < XVECLEN (pat, 0); i++)
1211 {
1212 rtx tem = XVECEXP (pat, 0, i);
1213
1214 if (GET_CODE (tem) == USE
1215 || GET_CODE (tem) == CLOBBER)
1216 continue;
1217
1218 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1219 return 0;
1220 }
1221
1222 return 1;
1223 }
1224 return 0;
1225 }
1226 \f
1227
1228 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1229 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1230 If the object was modified, if we hit a partial assignment to X, or hit a
1231 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1232 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1233 be the src. */
1234
1235 rtx
1236 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1237 {
1238 rtx p;
1239
1240 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1241 p = PREV_INSN (p))
1242 if (INSN_P (p))
1243 {
1244 rtx set = single_set (p);
1245 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1246
1247 if (set && rtx_equal_p (x, SET_DEST (set)))
1248 {
1249 rtx src = SET_SRC (set);
1250
1251 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1252 src = XEXP (note, 0);
1253
1254 if ((valid_to == NULL_RTX
1255 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1256 /* Reject hard registers because we don't usually want
1257 to use them; we'd rather use a pseudo. */
1258 && (! (REG_P (src)
1259 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1260 {
1261 *pinsn = p;
1262 return src;
1263 }
1264 }
1265
1266 /* If set in non-simple way, we don't have a value. */
1267 if (reg_set_p (x, p))
1268 break;
1269 }
1270
1271 return x;
1272 }
1273 \f
1274 /* Return nonzero if register in range [REGNO, ENDREGNO)
1275 appears either explicitly or implicitly in X
1276 other than being stored into.
1277
1278 References contained within the substructure at LOC do not count.
1279 LOC may be zero, meaning don't ignore anything. */
1280
1281 int
1282 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1283 rtx *loc)
1284 {
1285 int i;
1286 unsigned int x_regno;
1287 RTX_CODE code;
1288 const char *fmt;
1289
1290 repeat:
1291 /* The contents of a REG_NONNEG note is always zero, so we must come here
1292 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1293 if (x == 0)
1294 return 0;
1295
1296 code = GET_CODE (x);
1297
1298 switch (code)
1299 {
1300 case REG:
1301 x_regno = REGNO (x);
1302
1303 /* If we modifying the stack, frame, or argument pointer, it will
1304 clobber a virtual register. In fact, we could be more precise,
1305 but it isn't worth it. */
1306 if ((x_regno == STACK_POINTER_REGNUM
1307 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1308 || x_regno == ARG_POINTER_REGNUM
1309 #endif
1310 || x_regno == FRAME_POINTER_REGNUM)
1311 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1312 return 1;
1313
1314 return endregno > x_regno && regno < END_REGNO (x);
1315
1316 case SUBREG:
1317 /* If this is a SUBREG of a hard reg, we can see exactly which
1318 registers are being modified. Otherwise, handle normally. */
1319 if (REG_P (SUBREG_REG (x))
1320 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1321 {
1322 unsigned int inner_regno = subreg_regno (x);
1323 unsigned int inner_endregno
1324 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1325 ? subreg_nregs (x) : 1);
1326
1327 return endregno > inner_regno && regno < inner_endregno;
1328 }
1329 break;
1330
1331 case CLOBBER:
1332 case SET:
1333 if (&SET_DEST (x) != loc
1334 /* Note setting a SUBREG counts as referring to the REG it is in for
1335 a pseudo but not for hard registers since we can
1336 treat each word individually. */
1337 && ((GET_CODE (SET_DEST (x)) == SUBREG
1338 && loc != &SUBREG_REG (SET_DEST (x))
1339 && REG_P (SUBREG_REG (SET_DEST (x)))
1340 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1341 && refers_to_regno_p (regno, endregno,
1342 SUBREG_REG (SET_DEST (x)), loc))
1343 || (!REG_P (SET_DEST (x))
1344 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1345 return 1;
1346
1347 if (code == CLOBBER || loc == &SET_SRC (x))
1348 return 0;
1349 x = SET_SRC (x);
1350 goto repeat;
1351
1352 default:
1353 break;
1354 }
1355
1356 /* X does not match, so try its subexpressions. */
1357
1358 fmt = GET_RTX_FORMAT (code);
1359 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1360 {
1361 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1362 {
1363 if (i == 0)
1364 {
1365 x = XEXP (x, 0);
1366 goto repeat;
1367 }
1368 else
1369 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1370 return 1;
1371 }
1372 else if (fmt[i] == 'E')
1373 {
1374 int j;
1375 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1376 if (loc != &XVECEXP (x, i, j)
1377 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1378 return 1;
1379 }
1380 }
1381 return 0;
1382 }
1383
1384 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1385 we check if any register number in X conflicts with the relevant register
1386 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1387 contains a MEM (we don't bother checking for memory addresses that can't
1388 conflict because we expect this to be a rare case. */
1389
1390 int
1391 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1392 {
1393 unsigned int regno, endregno;
1394
1395 /* If either argument is a constant, then modifying X can not
1396 affect IN. Here we look at IN, we can profitably combine
1397 CONSTANT_P (x) with the switch statement below. */
1398 if (CONSTANT_P (in))
1399 return 0;
1400
1401 recurse:
1402 switch (GET_CODE (x))
1403 {
1404 case STRICT_LOW_PART:
1405 case ZERO_EXTRACT:
1406 case SIGN_EXTRACT:
1407 /* Overly conservative. */
1408 x = XEXP (x, 0);
1409 goto recurse;
1410
1411 case SUBREG:
1412 regno = REGNO (SUBREG_REG (x));
1413 if (regno < FIRST_PSEUDO_REGISTER)
1414 regno = subreg_regno (x);
1415 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1416 ? subreg_nregs (x) : 1);
1417 goto do_reg;
1418
1419 case REG:
1420 regno = REGNO (x);
1421 endregno = END_REGNO (x);
1422 do_reg:
1423 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1424
1425 case MEM:
1426 {
1427 const char *fmt;
1428 int i;
1429
1430 if (MEM_P (in))
1431 return 1;
1432
1433 fmt = GET_RTX_FORMAT (GET_CODE (in));
1434 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1435 if (fmt[i] == 'e')
1436 {
1437 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1438 return 1;
1439 }
1440 else if (fmt[i] == 'E')
1441 {
1442 int j;
1443 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1444 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1445 return 1;
1446 }
1447
1448 return 0;
1449 }
1450
1451 case SCRATCH:
1452 case PC:
1453 case CC0:
1454 return reg_mentioned_p (x, in);
1455
1456 case PARALLEL:
1457 {
1458 int i;
1459
1460 /* If any register in here refers to it we return true. */
1461 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1462 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1463 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1464 return 1;
1465 return 0;
1466 }
1467
1468 default:
1469 gcc_assert (CONSTANT_P (x));
1470 return 0;
1471 }
1472 }
1473 \f
1474 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1475 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1476 ignored by note_stores, but passed to FUN.
1477
1478 FUN receives three arguments:
1479 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1480 2. the SET or CLOBBER rtx that does the store,
1481 3. the pointer DATA provided to note_stores.
1482
1483 If the item being stored in or clobbered is a SUBREG of a hard register,
1484 the SUBREG will be passed. */
1485
1486 void
1487 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1488 {
1489 int i;
1490
1491 if (GET_CODE (x) == COND_EXEC)
1492 x = COND_EXEC_CODE (x);
1493
1494 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1495 {
1496 rtx dest = SET_DEST (x);
1497
1498 while ((GET_CODE (dest) == SUBREG
1499 && (!REG_P (SUBREG_REG (dest))
1500 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1501 || GET_CODE (dest) == ZERO_EXTRACT
1502 || GET_CODE (dest) == STRICT_LOW_PART)
1503 dest = XEXP (dest, 0);
1504
1505 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1506 each of whose first operand is a register. */
1507 if (GET_CODE (dest) == PARALLEL)
1508 {
1509 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1510 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1511 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1512 }
1513 else
1514 (*fun) (dest, x, data);
1515 }
1516
1517 else if (GET_CODE (x) == PARALLEL)
1518 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1519 note_stores (XVECEXP (x, 0, i), fun, data);
1520 }
1521 \f
1522 /* Like notes_stores, but call FUN for each expression that is being
1523 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1524 FUN for each expression, not any interior subexpressions. FUN receives a
1525 pointer to the expression and the DATA passed to this function.
1526
1527 Note that this is not quite the same test as that done in reg_referenced_p
1528 since that considers something as being referenced if it is being
1529 partially set, while we do not. */
1530
1531 void
1532 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1533 {
1534 rtx body = *pbody;
1535 int i;
1536
1537 switch (GET_CODE (body))
1538 {
1539 case COND_EXEC:
1540 (*fun) (&COND_EXEC_TEST (body), data);
1541 note_uses (&COND_EXEC_CODE (body), fun, data);
1542 return;
1543
1544 case PARALLEL:
1545 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1546 note_uses (&XVECEXP (body, 0, i), fun, data);
1547 return;
1548
1549 case SEQUENCE:
1550 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1551 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1552 return;
1553
1554 case USE:
1555 (*fun) (&XEXP (body, 0), data);
1556 return;
1557
1558 case ASM_OPERANDS:
1559 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1560 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1561 return;
1562
1563 case TRAP_IF:
1564 (*fun) (&TRAP_CONDITION (body), data);
1565 return;
1566
1567 case PREFETCH:
1568 (*fun) (&XEXP (body, 0), data);
1569 return;
1570
1571 case UNSPEC:
1572 case UNSPEC_VOLATILE:
1573 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1574 (*fun) (&XVECEXP (body, 0, i), data);
1575 return;
1576
1577 case CLOBBER:
1578 if (MEM_P (XEXP (body, 0)))
1579 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1580 return;
1581
1582 case SET:
1583 {
1584 rtx dest = SET_DEST (body);
1585
1586 /* For sets we replace everything in source plus registers in memory
1587 expression in store and operands of a ZERO_EXTRACT. */
1588 (*fun) (&SET_SRC (body), data);
1589
1590 if (GET_CODE (dest) == ZERO_EXTRACT)
1591 {
1592 (*fun) (&XEXP (dest, 1), data);
1593 (*fun) (&XEXP (dest, 2), data);
1594 }
1595
1596 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1597 dest = XEXP (dest, 0);
1598
1599 if (MEM_P (dest))
1600 (*fun) (&XEXP (dest, 0), data);
1601 }
1602 return;
1603
1604 default:
1605 /* All the other possibilities never store. */
1606 (*fun) (pbody, data);
1607 return;
1608 }
1609 }
1610 \f
1611 /* Return nonzero if X's old contents don't survive after INSN.
1612 This will be true if X is (cc0) or if X is a register and
1613 X dies in INSN or because INSN entirely sets X.
1614
1615 "Entirely set" means set directly and not through a SUBREG, or
1616 ZERO_EXTRACT, so no trace of the old contents remains.
1617 Likewise, REG_INC does not count.
1618
1619 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1620 but for this use that makes no difference, since regs don't overlap
1621 during their lifetimes. Therefore, this function may be used
1622 at any time after deaths have been computed.
1623
1624 If REG is a hard reg that occupies multiple machine registers, this
1625 function will only return 1 if each of those registers will be replaced
1626 by INSN. */
1627
1628 int
1629 dead_or_set_p (const_rtx insn, const_rtx x)
1630 {
1631 unsigned int regno, end_regno;
1632 unsigned int i;
1633
1634 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1635 if (GET_CODE (x) == CC0)
1636 return 1;
1637
1638 gcc_assert (REG_P (x));
1639
1640 regno = REGNO (x);
1641 end_regno = END_REGNO (x);
1642 for (i = regno; i < end_regno; i++)
1643 if (! dead_or_set_regno_p (insn, i))
1644 return 0;
1645
1646 return 1;
1647 }
1648
1649 /* Return TRUE iff DEST is a register or subreg of a register and
1650 doesn't change the number of words of the inner register, and any
1651 part of the register is TEST_REGNO. */
1652
1653 static bool
1654 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1655 {
1656 unsigned int regno, endregno;
1657
1658 if (GET_CODE (dest) == SUBREG
1659 && (((GET_MODE_SIZE (GET_MODE (dest))
1660 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1661 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1662 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1663 dest = SUBREG_REG (dest);
1664
1665 if (!REG_P (dest))
1666 return false;
1667
1668 regno = REGNO (dest);
1669 endregno = END_REGNO (dest);
1670 return (test_regno >= regno && test_regno < endregno);
1671 }
1672
1673 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1674 any member matches the covers_regno_no_parallel_p criteria. */
1675
1676 static bool
1677 covers_regno_p (const_rtx dest, unsigned int test_regno)
1678 {
1679 if (GET_CODE (dest) == PARALLEL)
1680 {
1681 /* Some targets place small structures in registers for return
1682 values of functions, and those registers are wrapped in
1683 PARALLELs that we may see as the destination of a SET. */
1684 int i;
1685
1686 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1687 {
1688 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1689 if (inner != NULL_RTX
1690 && covers_regno_no_parallel_p (inner, test_regno))
1691 return true;
1692 }
1693
1694 return false;
1695 }
1696 else
1697 return covers_regno_no_parallel_p (dest, test_regno);
1698 }
1699
1700 /* Utility function for dead_or_set_p to check an individual register. */
1701
1702 int
1703 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1704 {
1705 const_rtx pattern;
1706
1707 /* See if there is a death note for something that includes TEST_REGNO. */
1708 if (find_regno_note (insn, REG_DEAD, test_regno))
1709 return 1;
1710
1711 if (CALL_P (insn)
1712 && find_regno_fusage (insn, CLOBBER, test_regno))
1713 return 1;
1714
1715 pattern = PATTERN (insn);
1716
1717 /* If a COND_EXEC is not executed, the value survives. */
1718 if (GET_CODE (pattern) == COND_EXEC)
1719 return 0;
1720
1721 if (GET_CODE (pattern) == SET)
1722 return covers_regno_p (SET_DEST (pattern), test_regno);
1723 else if (GET_CODE (pattern) == PARALLEL)
1724 {
1725 int i;
1726
1727 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1728 {
1729 rtx body = XVECEXP (pattern, 0, i);
1730
1731 if (GET_CODE (body) == COND_EXEC)
1732 body = COND_EXEC_CODE (body);
1733
1734 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1735 && covers_regno_p (SET_DEST (body), test_regno))
1736 return 1;
1737 }
1738 }
1739
1740 return 0;
1741 }
1742
1743 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1744 If DATUM is nonzero, look for one whose datum is DATUM. */
1745
1746 rtx
1747 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1748 {
1749 rtx link;
1750
1751 gcc_checking_assert (insn);
1752
1753 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1754 if (! INSN_P (insn))
1755 return 0;
1756 if (datum == 0)
1757 {
1758 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1759 if (REG_NOTE_KIND (link) == kind)
1760 return link;
1761 return 0;
1762 }
1763
1764 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1765 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1766 return link;
1767 return 0;
1768 }
1769
1770 /* Return the reg-note of kind KIND in insn INSN which applies to register
1771 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1772 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1773 it might be the case that the note overlaps REGNO. */
1774
1775 rtx
1776 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1777 {
1778 rtx link;
1779
1780 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1781 if (! INSN_P (insn))
1782 return 0;
1783
1784 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1785 if (REG_NOTE_KIND (link) == kind
1786 /* Verify that it is a register, so that scratch and MEM won't cause a
1787 problem here. */
1788 && REG_P (XEXP (link, 0))
1789 && REGNO (XEXP (link, 0)) <= regno
1790 && END_REGNO (XEXP (link, 0)) > regno)
1791 return link;
1792 return 0;
1793 }
1794
1795 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1796 has such a note. */
1797
1798 rtx
1799 find_reg_equal_equiv_note (const_rtx insn)
1800 {
1801 rtx link;
1802
1803 if (!INSN_P (insn))
1804 return 0;
1805
1806 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1807 if (REG_NOTE_KIND (link) == REG_EQUAL
1808 || REG_NOTE_KIND (link) == REG_EQUIV)
1809 {
1810 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1811 insns that have multiple sets. Checking single_set to
1812 make sure of this is not the proper check, as explained
1813 in the comment in set_unique_reg_note.
1814
1815 This should be changed into an assert. */
1816 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1817 return 0;
1818 return link;
1819 }
1820 return NULL;
1821 }
1822
1823 /* Check whether INSN is a single_set whose source is known to be
1824 equivalent to a constant. Return that constant if so, otherwise
1825 return null. */
1826
1827 rtx
1828 find_constant_src (const_rtx insn)
1829 {
1830 rtx note, set, x;
1831
1832 set = single_set (insn);
1833 if (set)
1834 {
1835 x = avoid_constant_pool_reference (SET_SRC (set));
1836 if (CONSTANT_P (x))
1837 return x;
1838 }
1839
1840 note = find_reg_equal_equiv_note (insn);
1841 if (note && CONSTANT_P (XEXP (note, 0)))
1842 return XEXP (note, 0);
1843
1844 return NULL_RTX;
1845 }
1846
1847 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1848 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1849
1850 int
1851 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1852 {
1853 /* If it's not a CALL_INSN, it can't possibly have a
1854 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1855 if (!CALL_P (insn))
1856 return 0;
1857
1858 gcc_assert (datum);
1859
1860 if (!REG_P (datum))
1861 {
1862 rtx link;
1863
1864 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1865 link;
1866 link = XEXP (link, 1))
1867 if (GET_CODE (XEXP (link, 0)) == code
1868 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1869 return 1;
1870 }
1871 else
1872 {
1873 unsigned int regno = REGNO (datum);
1874
1875 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1876 to pseudo registers, so don't bother checking. */
1877
1878 if (regno < FIRST_PSEUDO_REGISTER)
1879 {
1880 unsigned int end_regno = END_HARD_REGNO (datum);
1881 unsigned int i;
1882
1883 for (i = regno; i < end_regno; i++)
1884 if (find_regno_fusage (insn, code, i))
1885 return 1;
1886 }
1887 }
1888
1889 return 0;
1890 }
1891
1892 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1893 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1894
1895 int
1896 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1897 {
1898 rtx link;
1899
1900 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1901 to pseudo registers, so don't bother checking. */
1902
1903 if (regno >= FIRST_PSEUDO_REGISTER
1904 || !CALL_P (insn) )
1905 return 0;
1906
1907 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1908 {
1909 rtx op, reg;
1910
1911 if (GET_CODE (op = XEXP (link, 0)) == code
1912 && REG_P (reg = XEXP (op, 0))
1913 && REGNO (reg) <= regno
1914 && END_HARD_REGNO (reg) > regno)
1915 return 1;
1916 }
1917
1918 return 0;
1919 }
1920
1921 \f
1922 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1923 stored as the pointer to the next register note. */
1924
1925 rtx
1926 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1927 {
1928 rtx note;
1929
1930 switch (kind)
1931 {
1932 case REG_CC_SETTER:
1933 case REG_CC_USER:
1934 case REG_LABEL_TARGET:
1935 case REG_LABEL_OPERAND:
1936 case REG_TM:
1937 /* These types of register notes use an INSN_LIST rather than an
1938 EXPR_LIST, so that copying is done right and dumps look
1939 better. */
1940 note = alloc_INSN_LIST (datum, list);
1941 PUT_REG_NOTE_KIND (note, kind);
1942 break;
1943
1944 default:
1945 note = alloc_EXPR_LIST (kind, datum, list);
1946 break;
1947 }
1948
1949 return note;
1950 }
1951
1952 /* Add register note with kind KIND and datum DATUM to INSN. */
1953
1954 void
1955 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1956 {
1957 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1958 }
1959
1960 /* Remove register note NOTE from the REG_NOTES of INSN. */
1961
1962 void
1963 remove_note (rtx insn, const_rtx note)
1964 {
1965 rtx link;
1966
1967 if (note == NULL_RTX)
1968 return;
1969
1970 if (REG_NOTES (insn) == note)
1971 REG_NOTES (insn) = XEXP (note, 1);
1972 else
1973 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1974 if (XEXP (link, 1) == note)
1975 {
1976 XEXP (link, 1) = XEXP (note, 1);
1977 break;
1978 }
1979
1980 switch (REG_NOTE_KIND (note))
1981 {
1982 case REG_EQUAL:
1983 case REG_EQUIV:
1984 df_notes_rescan (insn);
1985 break;
1986 default:
1987 break;
1988 }
1989 }
1990
1991 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1992
1993 void
1994 remove_reg_equal_equiv_notes (rtx insn)
1995 {
1996 rtx *loc;
1997
1998 loc = &REG_NOTES (insn);
1999 while (*loc)
2000 {
2001 enum reg_note kind = REG_NOTE_KIND (*loc);
2002 if (kind == REG_EQUAL || kind == REG_EQUIV)
2003 *loc = XEXP (*loc, 1);
2004 else
2005 loc = &XEXP (*loc, 1);
2006 }
2007 }
2008
2009 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2010
2011 void
2012 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2013 {
2014 df_ref eq_use;
2015
2016 if (!df)
2017 return;
2018
2019 /* This loop is a little tricky. We cannot just go down the chain because
2020 it is being modified by some actions in the loop. So we just iterate
2021 over the head. We plan to drain the list anyway. */
2022 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2023 {
2024 rtx insn = DF_REF_INSN (eq_use);
2025 rtx note = find_reg_equal_equiv_note (insn);
2026
2027 /* This assert is generally triggered when someone deletes a REG_EQUAL
2028 or REG_EQUIV note by hacking the list manually rather than calling
2029 remove_note. */
2030 gcc_assert (note);
2031
2032 remove_note (insn, note);
2033 }
2034 }
2035
2036 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2037 return 1 if it is found. A simple equality test is used to determine if
2038 NODE matches. */
2039
2040 int
2041 in_expr_list_p (const_rtx listp, const_rtx node)
2042 {
2043 const_rtx x;
2044
2045 for (x = listp; x; x = XEXP (x, 1))
2046 if (node == XEXP (x, 0))
2047 return 1;
2048
2049 return 0;
2050 }
2051
2052 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2053 remove that entry from the list if it is found.
2054
2055 A simple equality test is used to determine if NODE matches. */
2056
2057 void
2058 remove_node_from_expr_list (const_rtx node, rtx *listp)
2059 {
2060 rtx temp = *listp;
2061 rtx prev = NULL_RTX;
2062
2063 while (temp)
2064 {
2065 if (node == XEXP (temp, 0))
2066 {
2067 /* Splice the node out of the list. */
2068 if (prev)
2069 XEXP (prev, 1) = XEXP (temp, 1);
2070 else
2071 *listp = XEXP (temp, 1);
2072
2073 return;
2074 }
2075
2076 prev = temp;
2077 temp = XEXP (temp, 1);
2078 }
2079 }
2080 \f
2081 /* Nonzero if X contains any volatile instructions. These are instructions
2082 which may cause unpredictable machine state instructions, and thus no
2083 instructions or register uses should be moved or combined across them.
2084 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2085
2086 int
2087 volatile_insn_p (const_rtx x)
2088 {
2089 const RTX_CODE code = GET_CODE (x);
2090 switch (code)
2091 {
2092 case LABEL_REF:
2093 case SYMBOL_REF:
2094 case CONST:
2095 CASE_CONST_ANY:
2096 case CC0:
2097 case PC:
2098 case REG:
2099 case SCRATCH:
2100 case CLOBBER:
2101 case ADDR_VEC:
2102 case ADDR_DIFF_VEC:
2103 case CALL:
2104 case MEM:
2105 return 0;
2106
2107 case UNSPEC_VOLATILE:
2108 return 1;
2109
2110 case ASM_INPUT:
2111 case ASM_OPERANDS:
2112 if (MEM_VOLATILE_P (x))
2113 return 1;
2114
2115 default:
2116 break;
2117 }
2118
2119 /* Recursively scan the operands of this expression. */
2120
2121 {
2122 const char *const fmt = GET_RTX_FORMAT (code);
2123 int i;
2124
2125 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2126 {
2127 if (fmt[i] == 'e')
2128 {
2129 if (volatile_insn_p (XEXP (x, i)))
2130 return 1;
2131 }
2132 else if (fmt[i] == 'E')
2133 {
2134 int j;
2135 for (j = 0; j < XVECLEN (x, i); j++)
2136 if (volatile_insn_p (XVECEXP (x, i, j)))
2137 return 1;
2138 }
2139 }
2140 }
2141 return 0;
2142 }
2143
2144 /* Nonzero if X contains any volatile memory references
2145 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2146
2147 int
2148 volatile_refs_p (const_rtx x)
2149 {
2150 const RTX_CODE code = GET_CODE (x);
2151 switch (code)
2152 {
2153 case LABEL_REF:
2154 case SYMBOL_REF:
2155 case CONST:
2156 CASE_CONST_ANY:
2157 case CC0:
2158 case PC:
2159 case REG:
2160 case SCRATCH:
2161 case CLOBBER:
2162 case ADDR_VEC:
2163 case ADDR_DIFF_VEC:
2164 return 0;
2165
2166 case UNSPEC_VOLATILE:
2167 return 1;
2168
2169 case MEM:
2170 case ASM_INPUT:
2171 case ASM_OPERANDS:
2172 if (MEM_VOLATILE_P (x))
2173 return 1;
2174
2175 default:
2176 break;
2177 }
2178
2179 /* Recursively scan the operands of this expression. */
2180
2181 {
2182 const char *const fmt = GET_RTX_FORMAT (code);
2183 int i;
2184
2185 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2186 {
2187 if (fmt[i] == 'e')
2188 {
2189 if (volatile_refs_p (XEXP (x, i)))
2190 return 1;
2191 }
2192 else if (fmt[i] == 'E')
2193 {
2194 int j;
2195 for (j = 0; j < XVECLEN (x, i); j++)
2196 if (volatile_refs_p (XVECEXP (x, i, j)))
2197 return 1;
2198 }
2199 }
2200 }
2201 return 0;
2202 }
2203
2204 /* Similar to above, except that it also rejects register pre- and post-
2205 incrementing. */
2206
2207 int
2208 side_effects_p (const_rtx x)
2209 {
2210 const RTX_CODE code = GET_CODE (x);
2211 switch (code)
2212 {
2213 case LABEL_REF:
2214 case SYMBOL_REF:
2215 case CONST:
2216 CASE_CONST_ANY:
2217 case CC0:
2218 case PC:
2219 case REG:
2220 case SCRATCH:
2221 case ADDR_VEC:
2222 case ADDR_DIFF_VEC:
2223 case VAR_LOCATION:
2224 return 0;
2225
2226 case CLOBBER:
2227 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2228 when some combination can't be done. If we see one, don't think
2229 that we can simplify the expression. */
2230 return (GET_MODE (x) != VOIDmode);
2231
2232 case PRE_INC:
2233 case PRE_DEC:
2234 case POST_INC:
2235 case POST_DEC:
2236 case PRE_MODIFY:
2237 case POST_MODIFY:
2238 case CALL:
2239 case UNSPEC_VOLATILE:
2240 return 1;
2241
2242 case MEM:
2243 case ASM_INPUT:
2244 case ASM_OPERANDS:
2245 if (MEM_VOLATILE_P (x))
2246 return 1;
2247
2248 default:
2249 break;
2250 }
2251
2252 /* Recursively scan the operands of this expression. */
2253
2254 {
2255 const char *fmt = GET_RTX_FORMAT (code);
2256 int i;
2257
2258 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2259 {
2260 if (fmt[i] == 'e')
2261 {
2262 if (side_effects_p (XEXP (x, i)))
2263 return 1;
2264 }
2265 else if (fmt[i] == 'E')
2266 {
2267 int j;
2268 for (j = 0; j < XVECLEN (x, i); j++)
2269 if (side_effects_p (XVECEXP (x, i, j)))
2270 return 1;
2271 }
2272 }
2273 }
2274 return 0;
2275 }
2276 \f
2277 /* Return nonzero if evaluating rtx X might cause a trap.
2278 FLAGS controls how to consider MEMs. A nonzero means the context
2279 of the access may have changed from the original, such that the
2280 address may have become invalid. */
2281
2282 int
2283 may_trap_p_1 (const_rtx x, unsigned flags)
2284 {
2285 int i;
2286 enum rtx_code code;
2287 const char *fmt;
2288
2289 /* We make no distinction currently, but this function is part of
2290 the internal target-hooks ABI so we keep the parameter as
2291 "unsigned flags". */
2292 bool code_changed = flags != 0;
2293
2294 if (x == 0)
2295 return 0;
2296 code = GET_CODE (x);
2297 switch (code)
2298 {
2299 /* Handle these cases quickly. */
2300 CASE_CONST_ANY:
2301 case SYMBOL_REF:
2302 case LABEL_REF:
2303 case CONST:
2304 case PC:
2305 case CC0:
2306 case REG:
2307 case SCRATCH:
2308 return 0;
2309
2310 case UNSPEC:
2311 return targetm.unspec_may_trap_p (x, flags);
2312
2313 case UNSPEC_VOLATILE:
2314 case ASM_INPUT:
2315 case TRAP_IF:
2316 return 1;
2317
2318 case ASM_OPERANDS:
2319 return MEM_VOLATILE_P (x);
2320
2321 /* Memory ref can trap unless it's a static var or a stack slot. */
2322 case MEM:
2323 /* Recognize specific pattern of stack checking probes. */
2324 if (flag_stack_check
2325 && MEM_VOLATILE_P (x)
2326 && XEXP (x, 0) == stack_pointer_rtx)
2327 return 1;
2328 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2329 reference; moving it out of context such as when moving code
2330 when optimizing, might cause its address to become invalid. */
2331 code_changed
2332 || !MEM_NOTRAP_P (x))
2333 {
2334 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2335 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2336 GET_MODE (x), code_changed);
2337 }
2338
2339 return 0;
2340
2341 /* Division by a non-constant might trap. */
2342 case DIV:
2343 case MOD:
2344 case UDIV:
2345 case UMOD:
2346 if (HONOR_SNANS (GET_MODE (x)))
2347 return 1;
2348 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2349 return flag_trapping_math;
2350 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2351 return 1;
2352 break;
2353
2354 case EXPR_LIST:
2355 /* An EXPR_LIST is used to represent a function call. This
2356 certainly may trap. */
2357 return 1;
2358
2359 case GE:
2360 case GT:
2361 case LE:
2362 case LT:
2363 case LTGT:
2364 case COMPARE:
2365 /* Some floating point comparisons may trap. */
2366 if (!flag_trapping_math)
2367 break;
2368 /* ??? There is no machine independent way to check for tests that trap
2369 when COMPARE is used, though many targets do make this distinction.
2370 For instance, sparc uses CCFPE for compares which generate exceptions
2371 and CCFP for compares which do not generate exceptions. */
2372 if (HONOR_NANS (GET_MODE (x)))
2373 return 1;
2374 /* But often the compare has some CC mode, so check operand
2375 modes as well. */
2376 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2377 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2378 return 1;
2379 break;
2380
2381 case EQ:
2382 case NE:
2383 if (HONOR_SNANS (GET_MODE (x)))
2384 return 1;
2385 /* Often comparison is CC mode, so check operand modes. */
2386 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2387 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2388 return 1;
2389 break;
2390
2391 case FIX:
2392 /* Conversion of floating point might trap. */
2393 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2394 return 1;
2395 break;
2396
2397 case NEG:
2398 case ABS:
2399 case SUBREG:
2400 /* These operations don't trap even with floating point. */
2401 break;
2402
2403 default:
2404 /* Any floating arithmetic may trap. */
2405 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2406 return 1;
2407 }
2408
2409 fmt = GET_RTX_FORMAT (code);
2410 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2411 {
2412 if (fmt[i] == 'e')
2413 {
2414 if (may_trap_p_1 (XEXP (x, i), flags))
2415 return 1;
2416 }
2417 else if (fmt[i] == 'E')
2418 {
2419 int j;
2420 for (j = 0; j < XVECLEN (x, i); j++)
2421 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2422 return 1;
2423 }
2424 }
2425 return 0;
2426 }
2427
2428 /* Return nonzero if evaluating rtx X might cause a trap. */
2429
2430 int
2431 may_trap_p (const_rtx x)
2432 {
2433 return may_trap_p_1 (x, 0);
2434 }
2435
2436 /* Same as above, but additionally return nonzero if evaluating rtx X might
2437 cause a fault. We define a fault for the purpose of this function as a
2438 erroneous execution condition that cannot be encountered during the normal
2439 execution of a valid program; the typical example is an unaligned memory
2440 access on a strict alignment machine. The compiler guarantees that it
2441 doesn't generate code that will fault from a valid program, but this
2442 guarantee doesn't mean anything for individual instructions. Consider
2443 the following example:
2444
2445 struct S { int d; union { char *cp; int *ip; }; };
2446
2447 int foo(struct S *s)
2448 {
2449 if (s->d == 1)
2450 return *s->ip;
2451 else
2452 return *s->cp;
2453 }
2454
2455 on a strict alignment machine. In a valid program, foo will never be
2456 invoked on a structure for which d is equal to 1 and the underlying
2457 unique field of the union not aligned on a 4-byte boundary, but the
2458 expression *s->ip might cause a fault if considered individually.
2459
2460 At the RTL level, potentially problematic expressions will almost always
2461 verify may_trap_p; for example, the above dereference can be emitted as
2462 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2463 However, suppose that foo is inlined in a caller that causes s->cp to
2464 point to a local character variable and guarantees that s->d is not set
2465 to 1; foo may have been effectively translated into pseudo-RTL as:
2466
2467 if ((reg:SI) == 1)
2468 (set (reg:SI) (mem:SI (%fp - 7)))
2469 else
2470 (set (reg:QI) (mem:QI (%fp - 7)))
2471
2472 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2473 memory reference to a stack slot, but it will certainly cause a fault
2474 on a strict alignment machine. */
2475
2476 int
2477 may_trap_or_fault_p (const_rtx x)
2478 {
2479 return may_trap_p_1 (x, 1);
2480 }
2481 \f
2482 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2483 i.e., an inequality. */
2484
2485 int
2486 inequality_comparisons_p (const_rtx x)
2487 {
2488 const char *fmt;
2489 int len, i;
2490 const enum rtx_code code = GET_CODE (x);
2491
2492 switch (code)
2493 {
2494 case REG:
2495 case SCRATCH:
2496 case PC:
2497 case CC0:
2498 CASE_CONST_ANY:
2499 case CONST:
2500 case LABEL_REF:
2501 case SYMBOL_REF:
2502 return 0;
2503
2504 case LT:
2505 case LTU:
2506 case GT:
2507 case GTU:
2508 case LE:
2509 case LEU:
2510 case GE:
2511 case GEU:
2512 return 1;
2513
2514 default:
2515 break;
2516 }
2517
2518 len = GET_RTX_LENGTH (code);
2519 fmt = GET_RTX_FORMAT (code);
2520
2521 for (i = 0; i < len; i++)
2522 {
2523 if (fmt[i] == 'e')
2524 {
2525 if (inequality_comparisons_p (XEXP (x, i)))
2526 return 1;
2527 }
2528 else if (fmt[i] == 'E')
2529 {
2530 int j;
2531 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2532 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2533 return 1;
2534 }
2535 }
2536
2537 return 0;
2538 }
2539 \f
2540 /* Replace any occurrence of FROM in X with TO. The function does
2541 not enter into CONST_DOUBLE for the replace.
2542
2543 Note that copying is not done so X must not be shared unless all copies
2544 are to be modified. */
2545
2546 rtx
2547 replace_rtx (rtx x, rtx from, rtx to)
2548 {
2549 int i, j;
2550 const char *fmt;
2551
2552 if (x == from)
2553 return to;
2554
2555 /* Allow this function to make replacements in EXPR_LISTs. */
2556 if (x == 0)
2557 return 0;
2558
2559 if (GET_CODE (x) == SUBREG)
2560 {
2561 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2562
2563 if (CONST_INT_P (new_rtx))
2564 {
2565 x = simplify_subreg (GET_MODE (x), new_rtx,
2566 GET_MODE (SUBREG_REG (x)),
2567 SUBREG_BYTE (x));
2568 gcc_assert (x);
2569 }
2570 else
2571 SUBREG_REG (x) = new_rtx;
2572
2573 return x;
2574 }
2575 else if (GET_CODE (x) == ZERO_EXTEND)
2576 {
2577 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2578
2579 if (CONST_INT_P (new_rtx))
2580 {
2581 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2582 new_rtx, GET_MODE (XEXP (x, 0)));
2583 gcc_assert (x);
2584 }
2585 else
2586 XEXP (x, 0) = new_rtx;
2587
2588 return x;
2589 }
2590
2591 fmt = GET_RTX_FORMAT (GET_CODE (x));
2592 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2593 {
2594 if (fmt[i] == 'e')
2595 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2596 else if (fmt[i] == 'E')
2597 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2598 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2599 }
2600
2601 return x;
2602 }
2603 \f
2604 /* Replace occurrences of the old label in *X with the new one.
2605 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2606
2607 int
2608 replace_label (rtx *x, void *data)
2609 {
2610 rtx l = *x;
2611 rtx old_label = ((replace_label_data *) data)->r1;
2612 rtx new_label = ((replace_label_data *) data)->r2;
2613 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2614
2615 if (l == NULL_RTX)
2616 return 0;
2617
2618 if (GET_CODE (l) == SYMBOL_REF
2619 && CONSTANT_POOL_ADDRESS_P (l))
2620 {
2621 rtx c = get_pool_constant (l);
2622 if (rtx_referenced_p (old_label, c))
2623 {
2624 rtx new_c, new_l;
2625 replace_label_data *d = (replace_label_data *) data;
2626
2627 /* Create a copy of constant C; replace the label inside
2628 but do not update LABEL_NUSES because uses in constant pool
2629 are not counted. */
2630 new_c = copy_rtx (c);
2631 d->update_label_nuses = false;
2632 for_each_rtx (&new_c, replace_label, data);
2633 d->update_label_nuses = update_label_nuses;
2634
2635 /* Add the new constant NEW_C to constant pool and replace
2636 the old reference to constant by new reference. */
2637 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2638 *x = replace_rtx (l, l, new_l);
2639 }
2640 return 0;
2641 }
2642
2643 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2644 field. This is not handled by for_each_rtx because it doesn't
2645 handle unprinted ('0') fields. */
2646 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2647 JUMP_LABEL (l) = new_label;
2648
2649 if ((GET_CODE (l) == LABEL_REF
2650 || GET_CODE (l) == INSN_LIST)
2651 && XEXP (l, 0) == old_label)
2652 {
2653 XEXP (l, 0) = new_label;
2654 if (update_label_nuses)
2655 {
2656 ++LABEL_NUSES (new_label);
2657 --LABEL_NUSES (old_label);
2658 }
2659 return 0;
2660 }
2661
2662 return 0;
2663 }
2664
2665 /* When *BODY is equal to X or X is directly referenced by *BODY
2666 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2667 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2668
2669 static int
2670 rtx_referenced_p_1 (rtx *body, void *x)
2671 {
2672 rtx y = (rtx) x;
2673
2674 if (*body == NULL_RTX)
2675 return y == NULL_RTX;
2676
2677 /* Return true if a label_ref *BODY refers to label Y. */
2678 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2679 return XEXP (*body, 0) == y;
2680
2681 /* If *BODY is a reference to pool constant traverse the constant. */
2682 if (GET_CODE (*body) == SYMBOL_REF
2683 && CONSTANT_POOL_ADDRESS_P (*body))
2684 return rtx_referenced_p (y, get_pool_constant (*body));
2685
2686 /* By default, compare the RTL expressions. */
2687 return rtx_equal_p (*body, y);
2688 }
2689
2690 /* Return true if X is referenced in BODY. */
2691
2692 int
2693 rtx_referenced_p (rtx x, rtx body)
2694 {
2695 return for_each_rtx (&body, rtx_referenced_p_1, x);
2696 }
2697
2698 /* If INSN is a tablejump return true and store the label (before jump table) to
2699 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2700
2701 bool
2702 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2703 {
2704 rtx label, table;
2705
2706 if (!JUMP_P (insn))
2707 return false;
2708
2709 label = JUMP_LABEL (insn);
2710 if (label != NULL_RTX && !ANY_RETURN_P (label)
2711 && (table = next_active_insn (label)) != NULL_RTX
2712 && JUMP_TABLE_DATA_P (table))
2713 {
2714 gcc_assert (table == NEXT_INSN (label));
2715 if (labelp)
2716 *labelp = label;
2717 if (tablep)
2718 *tablep = table;
2719 return true;
2720 }
2721 return false;
2722 }
2723
2724 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2725 constant that is not in the constant pool and not in the condition
2726 of an IF_THEN_ELSE. */
2727
2728 static int
2729 computed_jump_p_1 (const_rtx x)
2730 {
2731 const enum rtx_code code = GET_CODE (x);
2732 int i, j;
2733 const char *fmt;
2734
2735 switch (code)
2736 {
2737 case LABEL_REF:
2738 case PC:
2739 return 0;
2740
2741 case CONST:
2742 CASE_CONST_ANY:
2743 case SYMBOL_REF:
2744 case REG:
2745 return 1;
2746
2747 case MEM:
2748 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2749 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2750
2751 case IF_THEN_ELSE:
2752 return (computed_jump_p_1 (XEXP (x, 1))
2753 || computed_jump_p_1 (XEXP (x, 2)));
2754
2755 default:
2756 break;
2757 }
2758
2759 fmt = GET_RTX_FORMAT (code);
2760 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2761 {
2762 if (fmt[i] == 'e'
2763 && computed_jump_p_1 (XEXP (x, i)))
2764 return 1;
2765
2766 else if (fmt[i] == 'E')
2767 for (j = 0; j < XVECLEN (x, i); j++)
2768 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2769 return 1;
2770 }
2771
2772 return 0;
2773 }
2774
2775 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2776
2777 Tablejumps and casesi insns are not considered indirect jumps;
2778 we can recognize them by a (use (label_ref)). */
2779
2780 int
2781 computed_jump_p (const_rtx insn)
2782 {
2783 int i;
2784 if (JUMP_P (insn))
2785 {
2786 rtx pat = PATTERN (insn);
2787
2788 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2789 if (JUMP_LABEL (insn) != NULL)
2790 return 0;
2791
2792 if (GET_CODE (pat) == PARALLEL)
2793 {
2794 int len = XVECLEN (pat, 0);
2795 int has_use_labelref = 0;
2796
2797 for (i = len - 1; i >= 0; i--)
2798 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2799 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2800 == LABEL_REF))
2801 has_use_labelref = 1;
2802
2803 if (! has_use_labelref)
2804 for (i = len - 1; i >= 0; i--)
2805 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2806 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2807 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2808 return 1;
2809 }
2810 else if (GET_CODE (pat) == SET
2811 && SET_DEST (pat) == pc_rtx
2812 && computed_jump_p_1 (SET_SRC (pat)))
2813 return 1;
2814 }
2815 return 0;
2816 }
2817
2818 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2819 calls. Processes the subexpressions of EXP and passes them to F. */
2820 static int
2821 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2822 {
2823 int result, i, j;
2824 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2825 rtx *x;
2826
2827 for (; format[n] != '\0'; n++)
2828 {
2829 switch (format[n])
2830 {
2831 case 'e':
2832 /* Call F on X. */
2833 x = &XEXP (exp, n);
2834 result = (*f) (x, data);
2835 if (result == -1)
2836 /* Do not traverse sub-expressions. */
2837 continue;
2838 else if (result != 0)
2839 /* Stop the traversal. */
2840 return result;
2841
2842 if (*x == NULL_RTX)
2843 /* There are no sub-expressions. */
2844 continue;
2845
2846 i = non_rtx_starting_operands[GET_CODE (*x)];
2847 if (i >= 0)
2848 {
2849 result = for_each_rtx_1 (*x, i, f, data);
2850 if (result != 0)
2851 return result;
2852 }
2853 break;
2854
2855 case 'V':
2856 case 'E':
2857 if (XVEC (exp, n) == 0)
2858 continue;
2859 for (j = 0; j < XVECLEN (exp, n); ++j)
2860 {
2861 /* Call F on X. */
2862 x = &XVECEXP (exp, n, j);
2863 result = (*f) (x, data);
2864 if (result == -1)
2865 /* Do not traverse sub-expressions. */
2866 continue;
2867 else if (result != 0)
2868 /* Stop the traversal. */
2869 return result;
2870
2871 if (*x == NULL_RTX)
2872 /* There are no sub-expressions. */
2873 continue;
2874
2875 i = non_rtx_starting_operands[GET_CODE (*x)];
2876 if (i >= 0)
2877 {
2878 result = for_each_rtx_1 (*x, i, f, data);
2879 if (result != 0)
2880 return result;
2881 }
2882 }
2883 break;
2884
2885 default:
2886 /* Nothing to do. */
2887 break;
2888 }
2889 }
2890
2891 return 0;
2892 }
2893
2894 /* Traverse X via depth-first search, calling F for each
2895 sub-expression (including X itself). F is also passed the DATA.
2896 If F returns -1, do not traverse sub-expressions, but continue
2897 traversing the rest of the tree. If F ever returns any other
2898 nonzero value, stop the traversal, and return the value returned
2899 by F. Otherwise, return 0. This function does not traverse inside
2900 tree structure that contains RTX_EXPRs, or into sub-expressions
2901 whose format code is `0' since it is not known whether or not those
2902 codes are actually RTL.
2903
2904 This routine is very general, and could (should?) be used to
2905 implement many of the other routines in this file. */
2906
2907 int
2908 for_each_rtx (rtx *x, rtx_function f, void *data)
2909 {
2910 int result;
2911 int i;
2912
2913 /* Call F on X. */
2914 result = (*f) (x, data);
2915 if (result == -1)
2916 /* Do not traverse sub-expressions. */
2917 return 0;
2918 else if (result != 0)
2919 /* Stop the traversal. */
2920 return result;
2921
2922 if (*x == NULL_RTX)
2923 /* There are no sub-expressions. */
2924 return 0;
2925
2926 i = non_rtx_starting_operands[GET_CODE (*x)];
2927 if (i < 0)
2928 return 0;
2929
2930 return for_each_rtx_1 (*x, i, f, data);
2931 }
2932
2933 \f
2934
2935 /* Data structure that holds the internal state communicated between
2936 for_each_inc_dec, for_each_inc_dec_find_mem and
2937 for_each_inc_dec_find_inc_dec. */
2938
2939 struct for_each_inc_dec_ops {
2940 /* The function to be called for each autoinc operation found. */
2941 for_each_inc_dec_fn fn;
2942 /* The opaque argument to be passed to it. */
2943 void *arg;
2944 /* The MEM we're visiting, if any. */
2945 rtx mem;
2946 };
2947
2948 static int for_each_inc_dec_find_mem (rtx *r, void *d);
2949
2950 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2951 operands of the equivalent add insn and pass the result to the
2952 operator specified by *D. */
2953
2954 static int
2955 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
2956 {
2957 rtx x = *r;
2958 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
2959
2960 switch (GET_CODE (x))
2961 {
2962 case PRE_INC:
2963 case POST_INC:
2964 {
2965 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2966 rtx r1 = XEXP (x, 0);
2967 rtx c = gen_int_mode (size, GET_MODE (r1));
2968 return data->fn (data->mem, x, r1, r1, c, data->arg);
2969 }
2970
2971 case PRE_DEC:
2972 case POST_DEC:
2973 {
2974 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2975 rtx r1 = XEXP (x, 0);
2976 rtx c = gen_int_mode (-size, GET_MODE (r1));
2977 return data->fn (data->mem, x, r1, r1, c, data->arg);
2978 }
2979
2980 case PRE_MODIFY:
2981 case POST_MODIFY:
2982 {
2983 rtx r1 = XEXP (x, 0);
2984 rtx add = XEXP (x, 1);
2985 return data->fn (data->mem, x, r1, add, NULL, data->arg);
2986 }
2987
2988 case MEM:
2989 {
2990 rtx save = data->mem;
2991 int ret = for_each_inc_dec_find_mem (r, d);
2992 data->mem = save;
2993 return ret;
2994 }
2995
2996 default:
2997 return 0;
2998 }
2999 }
3000
3001 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3002 address, extract the operands of the equivalent add insn and pass
3003 the result to the operator specified by *D. */
3004
3005 static int
3006 for_each_inc_dec_find_mem (rtx *r, void *d)
3007 {
3008 rtx x = *r;
3009 if (x != NULL_RTX && MEM_P (x))
3010 {
3011 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3012 int result;
3013
3014 data->mem = x;
3015
3016 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3017 data);
3018 if (result)
3019 return result;
3020
3021 return -1;
3022 }
3023 return 0;
3024 }
3025
3026 /* Traverse *X looking for MEMs, and for autoinc operations within
3027 them. For each such autoinc operation found, call FN, passing it
3028 the innermost enclosing MEM, the operation itself, the RTX modified
3029 by the operation, two RTXs (the second may be NULL) that, once
3030 added, represent the value to be held by the modified RTX
3031 afterwards, and ARG. FN is to return -1 to skip looking for other
3032 autoinc operations within the visited operation, 0 to continue the
3033 traversal, or any other value to have it returned to the caller of
3034 for_each_inc_dec. */
3035
3036 int
3037 for_each_inc_dec (rtx *x,
3038 for_each_inc_dec_fn fn,
3039 void *arg)
3040 {
3041 struct for_each_inc_dec_ops data;
3042
3043 data.fn = fn;
3044 data.arg = arg;
3045 data.mem = NULL;
3046
3047 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3048 }
3049
3050 \f
3051 /* Searches X for any reference to REGNO, returning the rtx of the
3052 reference found if any. Otherwise, returns NULL_RTX. */
3053
3054 rtx
3055 regno_use_in (unsigned int regno, rtx x)
3056 {
3057 const char *fmt;
3058 int i, j;
3059 rtx tem;
3060
3061 if (REG_P (x) && REGNO (x) == regno)
3062 return x;
3063
3064 fmt = GET_RTX_FORMAT (GET_CODE (x));
3065 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3066 {
3067 if (fmt[i] == 'e')
3068 {
3069 if ((tem = regno_use_in (regno, XEXP (x, i))))
3070 return tem;
3071 }
3072 else if (fmt[i] == 'E')
3073 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3074 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3075 return tem;
3076 }
3077
3078 return NULL_RTX;
3079 }
3080
3081 /* Return a value indicating whether OP, an operand of a commutative
3082 operation, is preferred as the first or second operand. The higher
3083 the value, the stronger the preference for being the first operand.
3084 We use negative values to indicate a preference for the first operand
3085 and positive values for the second operand. */
3086
3087 int
3088 commutative_operand_precedence (rtx op)
3089 {
3090 enum rtx_code code = GET_CODE (op);
3091
3092 /* Constants always come the second operand. Prefer "nice" constants. */
3093 if (code == CONST_INT)
3094 return -8;
3095 if (code == CONST_DOUBLE)
3096 return -7;
3097 if (code == CONST_FIXED)
3098 return -7;
3099 op = avoid_constant_pool_reference (op);
3100 code = GET_CODE (op);
3101
3102 switch (GET_RTX_CLASS (code))
3103 {
3104 case RTX_CONST_OBJ:
3105 if (code == CONST_INT)
3106 return -6;
3107 if (code == CONST_DOUBLE)
3108 return -5;
3109 if (code == CONST_FIXED)
3110 return -5;
3111 return -4;
3112
3113 case RTX_EXTRA:
3114 /* SUBREGs of objects should come second. */
3115 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3116 return -3;
3117 return 0;
3118
3119 case RTX_OBJ:
3120 /* Complex expressions should be the first, so decrease priority
3121 of objects. Prefer pointer objects over non pointer objects. */
3122 if ((REG_P (op) && REG_POINTER (op))
3123 || (MEM_P (op) && MEM_POINTER (op)))
3124 return -1;
3125 return -2;
3126
3127 case RTX_COMM_ARITH:
3128 /* Prefer operands that are themselves commutative to be first.
3129 This helps to make things linear. In particular,
3130 (and (and (reg) (reg)) (not (reg))) is canonical. */
3131 return 4;
3132
3133 case RTX_BIN_ARITH:
3134 /* If only one operand is a binary expression, it will be the first
3135 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3136 is canonical, although it will usually be further simplified. */
3137 return 2;
3138
3139 case RTX_UNARY:
3140 /* Then prefer NEG and NOT. */
3141 if (code == NEG || code == NOT)
3142 return 1;
3143
3144 default:
3145 return 0;
3146 }
3147 }
3148
3149 /* Return 1 iff it is necessary to swap operands of commutative operation
3150 in order to canonicalize expression. */
3151
3152 bool
3153 swap_commutative_operands_p (rtx x, rtx y)
3154 {
3155 return (commutative_operand_precedence (x)
3156 < commutative_operand_precedence (y));
3157 }
3158
3159 /* Return 1 if X is an autoincrement side effect and the register is
3160 not the stack pointer. */
3161 int
3162 auto_inc_p (const_rtx x)
3163 {
3164 switch (GET_CODE (x))
3165 {
3166 case PRE_INC:
3167 case POST_INC:
3168 case PRE_DEC:
3169 case POST_DEC:
3170 case PRE_MODIFY:
3171 case POST_MODIFY:
3172 /* There are no REG_INC notes for SP. */
3173 if (XEXP (x, 0) != stack_pointer_rtx)
3174 return 1;
3175 default:
3176 break;
3177 }
3178 return 0;
3179 }
3180
3181 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3182 int
3183 loc_mentioned_in_p (rtx *loc, const_rtx in)
3184 {
3185 enum rtx_code code;
3186 const char *fmt;
3187 int i, j;
3188
3189 if (!in)
3190 return 0;
3191
3192 code = GET_CODE (in);
3193 fmt = GET_RTX_FORMAT (code);
3194 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3195 {
3196 if (fmt[i] == 'e')
3197 {
3198 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3199 return 1;
3200 }
3201 else if (fmt[i] == 'E')
3202 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3203 if (loc == &XVECEXP (in, i, j)
3204 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3205 return 1;
3206 }
3207 return 0;
3208 }
3209
3210 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3211 and SUBREG_BYTE, return the bit offset where the subreg begins
3212 (counting from the least significant bit of the operand). */
3213
3214 unsigned int
3215 subreg_lsb_1 (enum machine_mode outer_mode,
3216 enum machine_mode inner_mode,
3217 unsigned int subreg_byte)
3218 {
3219 unsigned int bitpos;
3220 unsigned int byte;
3221 unsigned int word;
3222
3223 /* A paradoxical subreg begins at bit position 0. */
3224 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3225 return 0;
3226
3227 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3228 /* If the subreg crosses a word boundary ensure that
3229 it also begins and ends on a word boundary. */
3230 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3231 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3232 && (subreg_byte % UNITS_PER_WORD
3233 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3234
3235 if (WORDS_BIG_ENDIAN)
3236 word = (GET_MODE_SIZE (inner_mode)
3237 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3238 else
3239 word = subreg_byte / UNITS_PER_WORD;
3240 bitpos = word * BITS_PER_WORD;
3241
3242 if (BYTES_BIG_ENDIAN)
3243 byte = (GET_MODE_SIZE (inner_mode)
3244 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3245 else
3246 byte = subreg_byte % UNITS_PER_WORD;
3247 bitpos += byte * BITS_PER_UNIT;
3248
3249 return bitpos;
3250 }
3251
3252 /* Given a subreg X, return the bit offset where the subreg begins
3253 (counting from the least significant bit of the reg). */
3254
3255 unsigned int
3256 subreg_lsb (const_rtx x)
3257 {
3258 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3259 SUBREG_BYTE (x));
3260 }
3261
3262 /* Fill in information about a subreg of a hard register.
3263 xregno - A regno of an inner hard subreg_reg (or what will become one).
3264 xmode - The mode of xregno.
3265 offset - The byte offset.
3266 ymode - The mode of a top level SUBREG (or what may become one).
3267 info - Pointer to structure to fill in. */
3268 void
3269 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3270 unsigned int offset, enum machine_mode ymode,
3271 struct subreg_info *info)
3272 {
3273 int nregs_xmode, nregs_ymode;
3274 int mode_multiple, nregs_multiple;
3275 int offset_adj, y_offset, y_offset_adj;
3276 int regsize_xmode, regsize_ymode;
3277 bool rknown;
3278
3279 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3280
3281 rknown = false;
3282
3283 /* If there are holes in a non-scalar mode in registers, we expect
3284 that it is made up of its units concatenated together. */
3285 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3286 {
3287 enum machine_mode xmode_unit;
3288
3289 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3290 if (GET_MODE_INNER (xmode) == VOIDmode)
3291 xmode_unit = xmode;
3292 else
3293 xmode_unit = GET_MODE_INNER (xmode);
3294 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3295 gcc_assert (nregs_xmode
3296 == (GET_MODE_NUNITS (xmode)
3297 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3298 gcc_assert (hard_regno_nregs[xregno][xmode]
3299 == (hard_regno_nregs[xregno][xmode_unit]
3300 * GET_MODE_NUNITS (xmode)));
3301
3302 /* You can only ask for a SUBREG of a value with holes in the middle
3303 if you don't cross the holes. (Such a SUBREG should be done by
3304 picking a different register class, or doing it in memory if
3305 necessary.) An example of a value with holes is XCmode on 32-bit
3306 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3307 3 for each part, but in memory it's two 128-bit parts.
3308 Padding is assumed to be at the end (not necessarily the 'high part')
3309 of each unit. */
3310 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3311 < GET_MODE_NUNITS (xmode))
3312 && (offset / GET_MODE_SIZE (xmode_unit)
3313 != ((offset + GET_MODE_SIZE (ymode) - 1)
3314 / GET_MODE_SIZE (xmode_unit))))
3315 {
3316 info->representable_p = false;
3317 rknown = true;
3318 }
3319 }
3320 else
3321 nregs_xmode = hard_regno_nregs[xregno][xmode];
3322
3323 nregs_ymode = hard_regno_nregs[xregno][ymode];
3324
3325 /* Paradoxical subregs are otherwise valid. */
3326 if (!rknown
3327 && offset == 0
3328 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3329 {
3330 info->representable_p = true;
3331 /* If this is a big endian paradoxical subreg, which uses more
3332 actual hard registers than the original register, we must
3333 return a negative offset so that we find the proper highpart
3334 of the register. */
3335 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3336 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3337 info->offset = nregs_xmode - nregs_ymode;
3338 else
3339 info->offset = 0;
3340 info->nregs = nregs_ymode;
3341 return;
3342 }
3343
3344 /* If registers store different numbers of bits in the different
3345 modes, we cannot generally form this subreg. */
3346 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3347 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3348 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3349 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3350 {
3351 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3352 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3353 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3354 {
3355 info->representable_p = false;
3356 info->nregs
3357 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3358 info->offset = offset / regsize_xmode;
3359 return;
3360 }
3361 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3362 {
3363 info->representable_p = false;
3364 info->nregs
3365 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3366 info->offset = offset / regsize_xmode;
3367 return;
3368 }
3369 }
3370
3371 /* Lowpart subregs are otherwise valid. */
3372 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3373 {
3374 info->representable_p = true;
3375 rknown = true;
3376
3377 if (offset == 0 || nregs_xmode == nregs_ymode)
3378 {
3379 info->offset = 0;
3380 info->nregs = nregs_ymode;
3381 return;
3382 }
3383 }
3384
3385 /* This should always pass, otherwise we don't know how to verify
3386 the constraint. These conditions may be relaxed but
3387 subreg_regno_offset would need to be redesigned. */
3388 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3389 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3390
3391 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3392 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3393 {
3394 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3395 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3396 HOST_WIDE_INT off_low = offset & (ysize - 1);
3397 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3398 offset = (xsize - ysize - off_high) | off_low;
3399 }
3400 /* The XMODE value can be seen as a vector of NREGS_XMODE
3401 values. The subreg must represent a lowpart of given field.
3402 Compute what field it is. */
3403 offset_adj = offset;
3404 offset_adj -= subreg_lowpart_offset (ymode,
3405 mode_for_size (GET_MODE_BITSIZE (xmode)
3406 / nregs_xmode,
3407 MODE_INT, 0));
3408
3409 /* Size of ymode must not be greater than the size of xmode. */
3410 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3411 gcc_assert (mode_multiple != 0);
3412
3413 y_offset = offset / GET_MODE_SIZE (ymode);
3414 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3415 nregs_multiple = nregs_xmode / nregs_ymode;
3416
3417 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3418 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3419
3420 if (!rknown)
3421 {
3422 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3423 rknown = true;
3424 }
3425 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3426 info->nregs = nregs_ymode;
3427 }
3428
3429 /* This function returns the regno offset of a subreg expression.
3430 xregno - A regno of an inner hard subreg_reg (or what will become one).
3431 xmode - The mode of xregno.
3432 offset - The byte offset.
3433 ymode - The mode of a top level SUBREG (or what may become one).
3434 RETURN - The regno offset which would be used. */
3435 unsigned int
3436 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3437 unsigned int offset, enum machine_mode ymode)
3438 {
3439 struct subreg_info info;
3440 subreg_get_info (xregno, xmode, offset, ymode, &info);
3441 return info.offset;
3442 }
3443
3444 /* This function returns true when the offset is representable via
3445 subreg_offset in the given regno.
3446 xregno - A regno of an inner hard subreg_reg (or what will become one).
3447 xmode - The mode of xregno.
3448 offset - The byte offset.
3449 ymode - The mode of a top level SUBREG (or what may become one).
3450 RETURN - Whether the offset is representable. */
3451 bool
3452 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3453 unsigned int offset, enum machine_mode ymode)
3454 {
3455 struct subreg_info info;
3456 subreg_get_info (xregno, xmode, offset, ymode, &info);
3457 return info.representable_p;
3458 }
3459
3460 /* Return the number of a YMODE register to which
3461
3462 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3463
3464 can be simplified. Return -1 if the subreg can't be simplified.
3465
3466 XREGNO is a hard register number. */
3467
3468 int
3469 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3470 unsigned int offset, enum machine_mode ymode)
3471 {
3472 struct subreg_info info;
3473 unsigned int yregno;
3474
3475 #ifdef CANNOT_CHANGE_MODE_CLASS
3476 /* Give the backend a chance to disallow the mode change. */
3477 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3478 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3479 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3480 /* We can use mode change in LRA for some transformations. */
3481 && ! lra_in_progress)
3482 return -1;
3483 #endif
3484
3485 /* We shouldn't simplify stack-related registers. */
3486 if ((!reload_completed || frame_pointer_needed)
3487 && xregno == FRAME_POINTER_REGNUM)
3488 return -1;
3489
3490 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3491 && xregno == ARG_POINTER_REGNUM)
3492 return -1;
3493
3494 if (xregno == STACK_POINTER_REGNUM
3495 /* We should convert hard stack register in LRA if it is
3496 possible. */
3497 && ! lra_in_progress)
3498 return -1;
3499
3500 /* Try to get the register offset. */
3501 subreg_get_info (xregno, xmode, offset, ymode, &info);
3502 if (!info.representable_p)
3503 return -1;
3504
3505 /* Make sure that the offsetted register value is in range. */
3506 yregno = xregno + info.offset;
3507 if (!HARD_REGISTER_NUM_P (yregno))
3508 return -1;
3509
3510 /* See whether (reg:YMODE YREGNO) is valid.
3511
3512 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3513 This is a kludge to work around how complex FP arguments are passed
3514 on IA-64 and should be fixed. See PR target/49226. */
3515 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3516 && HARD_REGNO_MODE_OK (xregno, xmode))
3517 return -1;
3518
3519 return (int) yregno;
3520 }
3521
3522 /* Return the final regno that a subreg expression refers to. */
3523 unsigned int
3524 subreg_regno (const_rtx x)
3525 {
3526 unsigned int ret;
3527 rtx subreg = SUBREG_REG (x);
3528 int regno = REGNO (subreg);
3529
3530 ret = regno + subreg_regno_offset (regno,
3531 GET_MODE (subreg),
3532 SUBREG_BYTE (x),
3533 GET_MODE (x));
3534 return ret;
3535
3536 }
3537
3538 /* Return the number of registers that a subreg expression refers
3539 to. */
3540 unsigned int
3541 subreg_nregs (const_rtx x)
3542 {
3543 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3544 }
3545
3546 /* Return the number of registers that a subreg REG with REGNO
3547 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3548 changed so that the regno can be passed in. */
3549
3550 unsigned int
3551 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3552 {
3553 struct subreg_info info;
3554 rtx subreg = SUBREG_REG (x);
3555
3556 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3557 &info);
3558 return info.nregs;
3559 }
3560
3561
3562 struct parms_set_data
3563 {
3564 int nregs;
3565 HARD_REG_SET regs;
3566 };
3567
3568 /* Helper function for noticing stores to parameter registers. */
3569 static void
3570 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3571 {
3572 struct parms_set_data *const d = (struct parms_set_data *) data;
3573 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3574 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3575 {
3576 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3577 d->nregs--;
3578 }
3579 }
3580
3581 /* Look backward for first parameter to be loaded.
3582 Note that loads of all parameters will not necessarily be
3583 found if CSE has eliminated some of them (e.g., an argument
3584 to the outer function is passed down as a parameter).
3585 Do not skip BOUNDARY. */
3586 rtx
3587 find_first_parameter_load (rtx call_insn, rtx boundary)
3588 {
3589 struct parms_set_data parm;
3590 rtx p, before, first_set;
3591
3592 /* Since different machines initialize their parameter registers
3593 in different orders, assume nothing. Collect the set of all
3594 parameter registers. */
3595 CLEAR_HARD_REG_SET (parm.regs);
3596 parm.nregs = 0;
3597 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3598 if (GET_CODE (XEXP (p, 0)) == USE
3599 && REG_P (XEXP (XEXP (p, 0), 0)))
3600 {
3601 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3602
3603 /* We only care about registers which can hold function
3604 arguments. */
3605 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3606 continue;
3607
3608 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3609 parm.nregs++;
3610 }
3611 before = call_insn;
3612 first_set = call_insn;
3613
3614 /* Search backward for the first set of a register in this set. */
3615 while (parm.nregs && before != boundary)
3616 {
3617 before = PREV_INSN (before);
3618
3619 /* It is possible that some loads got CSEed from one call to
3620 another. Stop in that case. */
3621 if (CALL_P (before))
3622 break;
3623
3624 /* Our caller needs either ensure that we will find all sets
3625 (in case code has not been optimized yet), or take care
3626 for possible labels in a way by setting boundary to preceding
3627 CODE_LABEL. */
3628 if (LABEL_P (before))
3629 {
3630 gcc_assert (before == boundary);
3631 break;
3632 }
3633
3634 if (INSN_P (before))
3635 {
3636 int nregs_old = parm.nregs;
3637 note_stores (PATTERN (before), parms_set, &parm);
3638 /* If we found something that did not set a parameter reg,
3639 we're done. Do not keep going, as that might result
3640 in hoisting an insn before the setting of a pseudo
3641 that is used by the hoisted insn. */
3642 if (nregs_old != parm.nregs)
3643 first_set = before;
3644 else
3645 break;
3646 }
3647 }
3648 return first_set;
3649 }
3650
3651 /* Return true if we should avoid inserting code between INSN and preceding
3652 call instruction. */
3653
3654 bool
3655 keep_with_call_p (const_rtx insn)
3656 {
3657 rtx set;
3658
3659 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3660 {
3661 if (REG_P (SET_DEST (set))
3662 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3663 && fixed_regs[REGNO (SET_DEST (set))]
3664 && general_operand (SET_SRC (set), VOIDmode))
3665 return true;
3666 if (REG_P (SET_SRC (set))
3667 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3668 && REG_P (SET_DEST (set))
3669 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3670 return true;
3671 /* There may be a stack pop just after the call and before the store
3672 of the return register. Search for the actual store when deciding
3673 if we can break or not. */
3674 if (SET_DEST (set) == stack_pointer_rtx)
3675 {
3676 /* This CONST_CAST is okay because next_nonnote_insn just
3677 returns its argument and we assign it to a const_rtx
3678 variable. */
3679 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3680 if (i2 && keep_with_call_p (i2))
3681 return true;
3682 }
3683 }
3684 return false;
3685 }
3686
3687 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3688 to non-complex jumps. That is, direct unconditional, conditional,
3689 and tablejumps, but not computed jumps or returns. It also does
3690 not apply to the fallthru case of a conditional jump. */
3691
3692 bool
3693 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3694 {
3695 rtx tmp = JUMP_LABEL (jump_insn);
3696
3697 if (label == tmp)
3698 return true;
3699
3700 if (tablejump_p (jump_insn, NULL, &tmp))
3701 {
3702 rtvec vec = XVEC (PATTERN (tmp),
3703 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3704 int i, veclen = GET_NUM_ELEM (vec);
3705
3706 for (i = 0; i < veclen; ++i)
3707 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3708 return true;
3709 }
3710
3711 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3712 return true;
3713
3714 return false;
3715 }
3716
3717 \f
3718 /* Return an estimate of the cost of computing rtx X.
3719 One use is in cse, to decide which expression to keep in the hash table.
3720 Another is in rtl generation, to pick the cheapest way to multiply.
3721 Other uses like the latter are expected in the future.
3722
3723 X appears as operand OPNO in an expression with code OUTER_CODE.
3724 SPEED specifies whether costs optimized for speed or size should
3725 be returned. */
3726
3727 int
3728 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3729 {
3730 int i, j;
3731 enum rtx_code code;
3732 const char *fmt;
3733 int total;
3734 int factor;
3735
3736 if (x == 0)
3737 return 0;
3738
3739 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3740 many insns, taking N times as long. */
3741 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3742 if (factor == 0)
3743 factor = 1;
3744
3745 /* Compute the default costs of certain things.
3746 Note that targetm.rtx_costs can override the defaults. */
3747
3748 code = GET_CODE (x);
3749 switch (code)
3750 {
3751 case MULT:
3752 /* Multiplication has time-complexity O(N*N), where N is the
3753 number of units (translated from digits) when using
3754 schoolbook long multiplication. */
3755 total = factor * factor * COSTS_N_INSNS (5);
3756 break;
3757 case DIV:
3758 case UDIV:
3759 case MOD:
3760 case UMOD:
3761 /* Similarly, complexity for schoolbook long division. */
3762 total = factor * factor * COSTS_N_INSNS (7);
3763 break;
3764 case USE:
3765 /* Used in combine.c as a marker. */
3766 total = 0;
3767 break;
3768 case SET:
3769 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3770 the mode for the factor. */
3771 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3772 if (factor == 0)
3773 factor = 1;
3774 /* Pass through. */
3775 default:
3776 total = factor * COSTS_N_INSNS (1);
3777 }
3778
3779 switch (code)
3780 {
3781 case REG:
3782 return 0;
3783
3784 case SUBREG:
3785 total = 0;
3786 /* If we can't tie these modes, make this expensive. The larger
3787 the mode, the more expensive it is. */
3788 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3789 return COSTS_N_INSNS (2 + factor);
3790 break;
3791
3792 default:
3793 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3794 return total;
3795 break;
3796 }
3797
3798 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3799 which is already in total. */
3800
3801 fmt = GET_RTX_FORMAT (code);
3802 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3803 if (fmt[i] == 'e')
3804 total += rtx_cost (XEXP (x, i), code, i, speed);
3805 else if (fmt[i] == 'E')
3806 for (j = 0; j < XVECLEN (x, i); j++)
3807 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3808
3809 return total;
3810 }
3811
3812 /* Fill in the structure C with information about both speed and size rtx
3813 costs for X, which is operand OPNO in an expression with code OUTER. */
3814
3815 void
3816 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3817 struct full_rtx_costs *c)
3818 {
3819 c->speed = rtx_cost (x, outer, opno, true);
3820 c->size = rtx_cost (x, outer, opno, false);
3821 }
3822
3823 \f
3824 /* Return cost of address expression X.
3825 Expect that X is properly formed address reference.
3826
3827 SPEED parameter specify whether costs optimized for speed or size should
3828 be returned. */
3829
3830 int
3831 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3832 {
3833 /* We may be asked for cost of various unusual addresses, such as operands
3834 of push instruction. It is not worthwhile to complicate writing
3835 of the target hook by such cases. */
3836
3837 if (!memory_address_addr_space_p (mode, x, as))
3838 return 1000;
3839
3840 return targetm.address_cost (x, mode, as, speed);
3841 }
3842
3843 /* If the target doesn't override, compute the cost as with arithmetic. */
3844
3845 int
3846 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3847 {
3848 return rtx_cost (x, MEM, 0, speed);
3849 }
3850 \f
3851
3852 unsigned HOST_WIDE_INT
3853 nonzero_bits (const_rtx x, enum machine_mode mode)
3854 {
3855 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3856 }
3857
3858 unsigned int
3859 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3860 {
3861 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3862 }
3863
3864 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3865 It avoids exponential behavior in nonzero_bits1 when X has
3866 identical subexpressions on the first or the second level. */
3867
3868 static unsigned HOST_WIDE_INT
3869 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3870 enum machine_mode known_mode,
3871 unsigned HOST_WIDE_INT known_ret)
3872 {
3873 if (x == known_x && mode == known_mode)
3874 return known_ret;
3875
3876 /* Try to find identical subexpressions. If found call
3877 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3878 precomputed value for the subexpression as KNOWN_RET. */
3879
3880 if (ARITHMETIC_P (x))
3881 {
3882 rtx x0 = XEXP (x, 0);
3883 rtx x1 = XEXP (x, 1);
3884
3885 /* Check the first level. */
3886 if (x0 == x1)
3887 return nonzero_bits1 (x, mode, x0, mode,
3888 cached_nonzero_bits (x0, mode, known_x,
3889 known_mode, known_ret));
3890
3891 /* Check the second level. */
3892 if (ARITHMETIC_P (x0)
3893 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3894 return nonzero_bits1 (x, mode, x1, mode,
3895 cached_nonzero_bits (x1, mode, known_x,
3896 known_mode, known_ret));
3897
3898 if (ARITHMETIC_P (x1)
3899 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3900 return nonzero_bits1 (x, mode, x0, mode,
3901 cached_nonzero_bits (x0, mode, known_x,
3902 known_mode, known_ret));
3903 }
3904
3905 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3906 }
3907
3908 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3909 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3910 is less useful. We can't allow both, because that results in exponential
3911 run time recursion. There is a nullstone testcase that triggered
3912 this. This macro avoids accidental uses of num_sign_bit_copies. */
3913 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3914
3915 /* Given an expression, X, compute which bits in X can be nonzero.
3916 We don't care about bits outside of those defined in MODE.
3917
3918 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3919 an arithmetic operation, we can do better. */
3920
3921 static unsigned HOST_WIDE_INT
3922 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3923 enum machine_mode known_mode,
3924 unsigned HOST_WIDE_INT known_ret)
3925 {
3926 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3927 unsigned HOST_WIDE_INT inner_nz;
3928 enum rtx_code code;
3929 enum machine_mode inner_mode;
3930 unsigned int mode_width = GET_MODE_PRECISION (mode);
3931
3932 /* For floating-point and vector values, assume all bits are needed. */
3933 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3934 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3935 return nonzero;
3936
3937 /* If X is wider than MODE, use its mode instead. */
3938 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
3939 {
3940 mode = GET_MODE (x);
3941 nonzero = GET_MODE_MASK (mode);
3942 mode_width = GET_MODE_PRECISION (mode);
3943 }
3944
3945 if (mode_width > HOST_BITS_PER_WIDE_INT)
3946 /* Our only callers in this case look for single bit values. So
3947 just return the mode mask. Those tests will then be false. */
3948 return nonzero;
3949
3950 #ifndef WORD_REGISTER_OPERATIONS
3951 /* If MODE is wider than X, but both are a single word for both the host
3952 and target machines, we can compute this from which bits of the
3953 object might be nonzero in its own mode, taking into account the fact
3954 that on many CISC machines, accessing an object in a wider mode
3955 causes the high-order bits to become undefined. So they are
3956 not known to be zero. */
3957
3958 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3959 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
3960 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3961 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
3962 {
3963 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3964 known_x, known_mode, known_ret);
3965 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3966 return nonzero;
3967 }
3968 #endif
3969
3970 code = GET_CODE (x);
3971 switch (code)
3972 {
3973 case REG:
3974 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3975 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3976 all the bits above ptr_mode are known to be zero. */
3977 /* As we do not know which address space the pointer is referring to,
3978 we can do this only if the target does not support different pointer
3979 or address modes depending on the address space. */
3980 if (target_default_pointer_address_modes_p ()
3981 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3982 && REG_POINTER (x))
3983 nonzero &= GET_MODE_MASK (ptr_mode);
3984 #endif
3985
3986 /* Include declared information about alignment of pointers. */
3987 /* ??? We don't properly preserve REG_POINTER changes across
3988 pointer-to-integer casts, so we can't trust it except for
3989 things that we know must be pointers. See execute/960116-1.c. */
3990 if ((x == stack_pointer_rtx
3991 || x == frame_pointer_rtx
3992 || x == arg_pointer_rtx)
3993 && REGNO_POINTER_ALIGN (REGNO (x)))
3994 {
3995 unsigned HOST_WIDE_INT alignment
3996 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3997
3998 #ifdef PUSH_ROUNDING
3999 /* If PUSH_ROUNDING is defined, it is possible for the
4000 stack to be momentarily aligned only to that amount,
4001 so we pick the least alignment. */
4002 if (x == stack_pointer_rtx && PUSH_ARGS)
4003 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4004 alignment);
4005 #endif
4006
4007 nonzero &= ~(alignment - 1);
4008 }
4009
4010 {
4011 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4012 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4013 known_mode, known_ret,
4014 &nonzero_for_hook);
4015
4016 if (new_rtx)
4017 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4018 known_mode, known_ret);
4019
4020 return nonzero_for_hook;
4021 }
4022
4023 case CONST_INT:
4024 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4025 /* If X is negative in MODE, sign-extend the value. */
4026 if (INTVAL (x) > 0
4027 && mode_width < BITS_PER_WORD
4028 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4029 != 0)
4030 return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
4031 #endif
4032
4033 return UINTVAL (x);
4034
4035 case MEM:
4036 #ifdef LOAD_EXTEND_OP
4037 /* In many, if not most, RISC machines, reading a byte from memory
4038 zeros the rest of the register. Noticing that fact saves a lot
4039 of extra zero-extends. */
4040 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4041 nonzero &= GET_MODE_MASK (GET_MODE (x));
4042 #endif
4043 break;
4044
4045 case EQ: case NE:
4046 case UNEQ: case LTGT:
4047 case GT: case GTU: case UNGT:
4048 case LT: case LTU: case UNLT:
4049 case GE: case GEU: case UNGE:
4050 case LE: case LEU: case UNLE:
4051 case UNORDERED: case ORDERED:
4052 /* If this produces an integer result, we know which bits are set.
4053 Code here used to clear bits outside the mode of X, but that is
4054 now done above. */
4055 /* Mind that MODE is the mode the caller wants to look at this
4056 operation in, and not the actual operation mode. We can wind
4057 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4058 that describes the results of a vector compare. */
4059 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4060 && mode_width <= HOST_BITS_PER_WIDE_INT)
4061 nonzero = STORE_FLAG_VALUE;
4062 break;
4063
4064 case NEG:
4065 #if 0
4066 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4067 and num_sign_bit_copies. */
4068 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4069 == GET_MODE_PRECISION (GET_MODE (x)))
4070 nonzero = 1;
4071 #endif
4072
4073 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4074 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4075 break;
4076
4077 case ABS:
4078 #if 0
4079 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4080 and num_sign_bit_copies. */
4081 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4082 == GET_MODE_PRECISION (GET_MODE (x)))
4083 nonzero = 1;
4084 #endif
4085 break;
4086
4087 case TRUNCATE:
4088 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4089 known_x, known_mode, known_ret)
4090 & GET_MODE_MASK (mode));
4091 break;
4092
4093 case ZERO_EXTEND:
4094 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4095 known_x, known_mode, known_ret);
4096 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4097 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4098 break;
4099
4100 case SIGN_EXTEND:
4101 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4102 Otherwise, show all the bits in the outer mode but not the inner
4103 may be nonzero. */
4104 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4105 known_x, known_mode, known_ret);
4106 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4107 {
4108 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4109 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4110 inner_nz |= (GET_MODE_MASK (mode)
4111 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4112 }
4113
4114 nonzero &= inner_nz;
4115 break;
4116
4117 case AND:
4118 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4119 known_x, known_mode, known_ret)
4120 & cached_nonzero_bits (XEXP (x, 1), mode,
4121 known_x, known_mode, known_ret);
4122 break;
4123
4124 case XOR: case IOR:
4125 case UMIN: case UMAX: case SMIN: case SMAX:
4126 {
4127 unsigned HOST_WIDE_INT nonzero0
4128 = cached_nonzero_bits (XEXP (x, 0), mode,
4129 known_x, known_mode, known_ret);
4130
4131 /* Don't call nonzero_bits for the second time if it cannot change
4132 anything. */
4133 if ((nonzero & nonzero0) != nonzero)
4134 nonzero &= nonzero0
4135 | cached_nonzero_bits (XEXP (x, 1), mode,
4136 known_x, known_mode, known_ret);
4137 }
4138 break;
4139
4140 case PLUS: case MINUS:
4141 case MULT:
4142 case DIV: case UDIV:
4143 case MOD: case UMOD:
4144 /* We can apply the rules of arithmetic to compute the number of
4145 high- and low-order zero bits of these operations. We start by
4146 computing the width (position of the highest-order nonzero bit)
4147 and the number of low-order zero bits for each value. */
4148 {
4149 unsigned HOST_WIDE_INT nz0
4150 = cached_nonzero_bits (XEXP (x, 0), mode,
4151 known_x, known_mode, known_ret);
4152 unsigned HOST_WIDE_INT nz1
4153 = cached_nonzero_bits (XEXP (x, 1), mode,
4154 known_x, known_mode, known_ret);
4155 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4156 int width0 = floor_log2 (nz0) + 1;
4157 int width1 = floor_log2 (nz1) + 1;
4158 int low0 = floor_log2 (nz0 & -nz0);
4159 int low1 = floor_log2 (nz1 & -nz1);
4160 unsigned HOST_WIDE_INT op0_maybe_minusp
4161 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4162 unsigned HOST_WIDE_INT op1_maybe_minusp
4163 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4164 unsigned int result_width = mode_width;
4165 int result_low = 0;
4166
4167 switch (code)
4168 {
4169 case PLUS:
4170 result_width = MAX (width0, width1) + 1;
4171 result_low = MIN (low0, low1);
4172 break;
4173 case MINUS:
4174 result_low = MIN (low0, low1);
4175 break;
4176 case MULT:
4177 result_width = width0 + width1;
4178 result_low = low0 + low1;
4179 break;
4180 case DIV:
4181 if (width1 == 0)
4182 break;
4183 if (!op0_maybe_minusp && !op1_maybe_minusp)
4184 result_width = width0;
4185 break;
4186 case UDIV:
4187 if (width1 == 0)
4188 break;
4189 result_width = width0;
4190 break;
4191 case MOD:
4192 if (width1 == 0)
4193 break;
4194 if (!op0_maybe_minusp && !op1_maybe_minusp)
4195 result_width = MIN (width0, width1);
4196 result_low = MIN (low0, low1);
4197 break;
4198 case UMOD:
4199 if (width1 == 0)
4200 break;
4201 result_width = MIN (width0, width1);
4202 result_low = MIN (low0, low1);
4203 break;
4204 default:
4205 gcc_unreachable ();
4206 }
4207
4208 if (result_width < mode_width)
4209 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4210
4211 if (result_low > 0)
4212 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4213 }
4214 break;
4215
4216 case ZERO_EXTRACT:
4217 if (CONST_INT_P (XEXP (x, 1))
4218 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4219 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4220 break;
4221
4222 case SUBREG:
4223 /* If this is a SUBREG formed for a promoted variable that has
4224 been zero-extended, we know that at least the high-order bits
4225 are zero, though others might be too. */
4226
4227 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4228 nonzero = GET_MODE_MASK (GET_MODE (x))
4229 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4230 known_x, known_mode, known_ret);
4231
4232 inner_mode = GET_MODE (SUBREG_REG (x));
4233 /* If the inner mode is a single word for both the host and target
4234 machines, we can compute this from which bits of the inner
4235 object might be nonzero. */
4236 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4237 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4238 {
4239 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4240 known_x, known_mode, known_ret);
4241
4242 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4243 /* If this is a typical RISC machine, we only have to worry
4244 about the way loads are extended. */
4245 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4246 ? val_signbit_known_set_p (inner_mode, nonzero)
4247 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4248 || !MEM_P (SUBREG_REG (x)))
4249 #endif
4250 {
4251 /* On many CISC machines, accessing an object in a wider mode
4252 causes the high-order bits to become undefined. So they are
4253 not known to be zero. */
4254 if (GET_MODE_PRECISION (GET_MODE (x))
4255 > GET_MODE_PRECISION (inner_mode))
4256 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4257 & ~GET_MODE_MASK (inner_mode));
4258 }
4259 }
4260 break;
4261
4262 case ASHIFTRT:
4263 case LSHIFTRT:
4264 case ASHIFT:
4265 case ROTATE:
4266 /* The nonzero bits are in two classes: any bits within MODE
4267 that aren't in GET_MODE (x) are always significant. The rest of the
4268 nonzero bits are those that are significant in the operand of
4269 the shift when shifted the appropriate number of bits. This
4270 shows that high-order bits are cleared by the right shift and
4271 low-order bits by left shifts. */
4272 if (CONST_INT_P (XEXP (x, 1))
4273 && INTVAL (XEXP (x, 1)) >= 0
4274 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4275 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4276 {
4277 enum machine_mode inner_mode = GET_MODE (x);
4278 unsigned int width = GET_MODE_PRECISION (inner_mode);
4279 int count = INTVAL (XEXP (x, 1));
4280 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4281 unsigned HOST_WIDE_INT op_nonzero
4282 = cached_nonzero_bits (XEXP (x, 0), mode,
4283 known_x, known_mode, known_ret);
4284 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4285 unsigned HOST_WIDE_INT outer = 0;
4286
4287 if (mode_width > width)
4288 outer = (op_nonzero & nonzero & ~mode_mask);
4289
4290 if (code == LSHIFTRT)
4291 inner >>= count;
4292 else if (code == ASHIFTRT)
4293 {
4294 inner >>= count;
4295
4296 /* If the sign bit may have been nonzero before the shift, we
4297 need to mark all the places it could have been copied to
4298 by the shift as possibly nonzero. */
4299 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4300 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4301 << (width - count);
4302 }
4303 else if (code == ASHIFT)
4304 inner <<= count;
4305 else
4306 inner = ((inner << (count % width)
4307 | (inner >> (width - (count % width)))) & mode_mask);
4308
4309 nonzero &= (outer | inner);
4310 }
4311 break;
4312
4313 case FFS:
4314 case POPCOUNT:
4315 /* This is at most the number of bits in the mode. */
4316 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4317 break;
4318
4319 case CLZ:
4320 /* If CLZ has a known value at zero, then the nonzero bits are
4321 that value, plus the number of bits in the mode minus one. */
4322 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4323 nonzero
4324 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4325 else
4326 nonzero = -1;
4327 break;
4328
4329 case CTZ:
4330 /* If CTZ has a known value at zero, then the nonzero bits are
4331 that value, plus the number of bits in the mode minus one. */
4332 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4333 nonzero
4334 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4335 else
4336 nonzero = -1;
4337 break;
4338
4339 case CLRSB:
4340 /* This is at most the number of bits in the mode minus 1. */
4341 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4342 break;
4343
4344 case PARITY:
4345 nonzero = 1;
4346 break;
4347
4348 case IF_THEN_ELSE:
4349 {
4350 unsigned HOST_WIDE_INT nonzero_true
4351 = cached_nonzero_bits (XEXP (x, 1), mode,
4352 known_x, known_mode, known_ret);
4353
4354 /* Don't call nonzero_bits for the second time if it cannot change
4355 anything. */
4356 if ((nonzero & nonzero_true) != nonzero)
4357 nonzero &= nonzero_true
4358 | cached_nonzero_bits (XEXP (x, 2), mode,
4359 known_x, known_mode, known_ret);
4360 }
4361 break;
4362
4363 default:
4364 break;
4365 }
4366
4367 return nonzero;
4368 }
4369
4370 /* See the macro definition above. */
4371 #undef cached_num_sign_bit_copies
4372
4373 \f
4374 /* The function cached_num_sign_bit_copies is a wrapper around
4375 num_sign_bit_copies1. It avoids exponential behavior in
4376 num_sign_bit_copies1 when X has identical subexpressions on the
4377 first or the second level. */
4378
4379 static unsigned int
4380 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4381 enum machine_mode known_mode,
4382 unsigned int known_ret)
4383 {
4384 if (x == known_x && mode == known_mode)
4385 return known_ret;
4386
4387 /* Try to find identical subexpressions. If found call
4388 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4389 the precomputed value for the subexpression as KNOWN_RET. */
4390
4391 if (ARITHMETIC_P (x))
4392 {
4393 rtx x0 = XEXP (x, 0);
4394 rtx x1 = XEXP (x, 1);
4395
4396 /* Check the first level. */
4397 if (x0 == x1)
4398 return
4399 num_sign_bit_copies1 (x, mode, x0, mode,
4400 cached_num_sign_bit_copies (x0, mode, known_x,
4401 known_mode,
4402 known_ret));
4403
4404 /* Check the second level. */
4405 if (ARITHMETIC_P (x0)
4406 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4407 return
4408 num_sign_bit_copies1 (x, mode, x1, mode,
4409 cached_num_sign_bit_copies (x1, mode, known_x,
4410 known_mode,
4411 known_ret));
4412
4413 if (ARITHMETIC_P (x1)
4414 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4415 return
4416 num_sign_bit_copies1 (x, mode, x0, mode,
4417 cached_num_sign_bit_copies (x0, mode, known_x,
4418 known_mode,
4419 known_ret));
4420 }
4421
4422 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4423 }
4424
4425 /* Return the number of bits at the high-order end of X that are known to
4426 be equal to the sign bit. X will be used in mode MODE; if MODE is
4427 VOIDmode, X will be used in its own mode. The returned value will always
4428 be between 1 and the number of bits in MODE. */
4429
4430 static unsigned int
4431 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4432 enum machine_mode known_mode,
4433 unsigned int known_ret)
4434 {
4435 enum rtx_code code = GET_CODE (x);
4436 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4437 int num0, num1, result;
4438 unsigned HOST_WIDE_INT nonzero;
4439
4440 /* If we weren't given a mode, use the mode of X. If the mode is still
4441 VOIDmode, we don't know anything. Likewise if one of the modes is
4442 floating-point. */
4443
4444 if (mode == VOIDmode)
4445 mode = GET_MODE (x);
4446
4447 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4448 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4449 return 1;
4450
4451 /* For a smaller object, just ignore the high bits. */
4452 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4453 {
4454 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4455 known_x, known_mode, known_ret);
4456 return MAX (1,
4457 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4458 }
4459
4460 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4461 {
4462 #ifndef WORD_REGISTER_OPERATIONS
4463 /* If this machine does not do all register operations on the entire
4464 register and MODE is wider than the mode of X, we can say nothing
4465 at all about the high-order bits. */
4466 return 1;
4467 #else
4468 /* Likewise on machines that do, if the mode of the object is smaller
4469 than a word and loads of that size don't sign extend, we can say
4470 nothing about the high order bits. */
4471 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4472 #ifdef LOAD_EXTEND_OP
4473 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4474 #endif
4475 )
4476 return 1;
4477 #endif
4478 }
4479
4480 switch (code)
4481 {
4482 case REG:
4483
4484 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4485 /* If pointers extend signed and this is a pointer in Pmode, say that
4486 all the bits above ptr_mode are known to be sign bit copies. */
4487 /* As we do not know which address space the pointer is referring to,
4488 we can do this only if the target does not support different pointer
4489 or address modes depending on the address space. */
4490 if (target_default_pointer_address_modes_p ()
4491 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4492 && mode == Pmode && REG_POINTER (x))
4493 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4494 #endif
4495
4496 {
4497 unsigned int copies_for_hook = 1, copies = 1;
4498 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4499 known_mode, known_ret,
4500 &copies_for_hook);
4501
4502 if (new_rtx)
4503 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4504 known_mode, known_ret);
4505
4506 if (copies > 1 || copies_for_hook > 1)
4507 return MAX (copies, copies_for_hook);
4508
4509 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4510 }
4511 break;
4512
4513 case MEM:
4514 #ifdef LOAD_EXTEND_OP
4515 /* Some RISC machines sign-extend all loads of smaller than a word. */
4516 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4517 return MAX (1, ((int) bitwidth
4518 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4519 #endif
4520 break;
4521
4522 case CONST_INT:
4523 /* If the constant is negative, take its 1's complement and remask.
4524 Then see how many zero bits we have. */
4525 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4526 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4527 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4528 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4529
4530 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4531
4532 case SUBREG:
4533 /* If this is a SUBREG for a promoted object that is sign-extended
4534 and we are looking at it in a wider mode, we know that at least the
4535 high-order bits are known to be sign bit copies. */
4536
4537 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4538 {
4539 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4540 known_x, known_mode, known_ret);
4541 return MAX ((int) bitwidth
4542 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4543 num0);
4544 }
4545
4546 /* For a smaller object, just ignore the high bits. */
4547 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4548 {
4549 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4550 known_x, known_mode, known_ret);
4551 return MAX (1, (num0
4552 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4553 - bitwidth)));
4554 }
4555
4556 #ifdef WORD_REGISTER_OPERATIONS
4557 #ifdef LOAD_EXTEND_OP
4558 /* For paradoxical SUBREGs on machines where all register operations
4559 affect the entire register, just look inside. Note that we are
4560 passing MODE to the recursive call, so the number of sign bit copies
4561 will remain relative to that mode, not the inner mode. */
4562
4563 /* This works only if loads sign extend. Otherwise, if we get a
4564 reload for the inner part, it may be loaded from the stack, and
4565 then we lose all sign bit copies that existed before the store
4566 to the stack. */
4567
4568 if (paradoxical_subreg_p (x)
4569 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4570 && MEM_P (SUBREG_REG (x)))
4571 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4572 known_x, known_mode, known_ret);
4573 #endif
4574 #endif
4575 break;
4576
4577 case SIGN_EXTRACT:
4578 if (CONST_INT_P (XEXP (x, 1)))
4579 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4580 break;
4581
4582 case SIGN_EXTEND:
4583 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4584 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4585 known_x, known_mode, known_ret));
4586
4587 case TRUNCATE:
4588 /* For a smaller object, just ignore the high bits. */
4589 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4590 known_x, known_mode, known_ret);
4591 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4592 - bitwidth)));
4593
4594 case NOT:
4595 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4596 known_x, known_mode, known_ret);
4597
4598 case ROTATE: case ROTATERT:
4599 /* If we are rotating left by a number of bits less than the number
4600 of sign bit copies, we can just subtract that amount from the
4601 number. */
4602 if (CONST_INT_P (XEXP (x, 1))
4603 && INTVAL (XEXP (x, 1)) >= 0
4604 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4605 {
4606 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4607 known_x, known_mode, known_ret);
4608 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4609 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4610 }
4611 break;
4612
4613 case NEG:
4614 /* In general, this subtracts one sign bit copy. But if the value
4615 is known to be positive, the number of sign bit copies is the
4616 same as that of the input. Finally, if the input has just one bit
4617 that might be nonzero, all the bits are copies of the sign bit. */
4618 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4619 known_x, known_mode, known_ret);
4620 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4621 return num0 > 1 ? num0 - 1 : 1;
4622
4623 nonzero = nonzero_bits (XEXP (x, 0), mode);
4624 if (nonzero == 1)
4625 return bitwidth;
4626
4627 if (num0 > 1
4628 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4629 num0--;
4630
4631 return num0;
4632
4633 case IOR: case AND: case XOR:
4634 case SMIN: case SMAX: case UMIN: case UMAX:
4635 /* Logical operations will preserve the number of sign-bit copies.
4636 MIN and MAX operations always return one of the operands. */
4637 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4638 known_x, known_mode, known_ret);
4639 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4640 known_x, known_mode, known_ret);
4641
4642 /* If num1 is clearing some of the top bits then regardless of
4643 the other term, we are guaranteed to have at least that many
4644 high-order zero bits. */
4645 if (code == AND
4646 && num1 > 1
4647 && bitwidth <= HOST_BITS_PER_WIDE_INT
4648 && CONST_INT_P (XEXP (x, 1))
4649 && (UINTVAL (XEXP (x, 1))
4650 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4651 return num1;
4652
4653 /* Similarly for IOR when setting high-order bits. */
4654 if (code == IOR
4655 && num1 > 1
4656 && bitwidth <= HOST_BITS_PER_WIDE_INT
4657 && CONST_INT_P (XEXP (x, 1))
4658 && (UINTVAL (XEXP (x, 1))
4659 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4660 return num1;
4661
4662 return MIN (num0, num1);
4663
4664 case PLUS: case MINUS:
4665 /* For addition and subtraction, we can have a 1-bit carry. However,
4666 if we are subtracting 1 from a positive number, there will not
4667 be such a carry. Furthermore, if the positive number is known to
4668 be 0 or 1, we know the result is either -1 or 0. */
4669
4670 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4671 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4672 {
4673 nonzero = nonzero_bits (XEXP (x, 0), mode);
4674 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4675 return (nonzero == 1 || nonzero == 0 ? bitwidth
4676 : bitwidth - floor_log2 (nonzero) - 1);
4677 }
4678
4679 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4680 known_x, known_mode, known_ret);
4681 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4682 known_x, known_mode, known_ret);
4683 result = MAX (1, MIN (num0, num1) - 1);
4684
4685 return result;
4686
4687 case MULT:
4688 /* The number of bits of the product is the sum of the number of
4689 bits of both terms. However, unless one of the terms if known
4690 to be positive, we must allow for an additional bit since negating
4691 a negative number can remove one sign bit copy. */
4692
4693 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4694 known_x, known_mode, known_ret);
4695 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4696 known_x, known_mode, known_ret);
4697
4698 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4699 if (result > 0
4700 && (bitwidth > HOST_BITS_PER_WIDE_INT
4701 || (((nonzero_bits (XEXP (x, 0), mode)
4702 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4703 && ((nonzero_bits (XEXP (x, 1), mode)
4704 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4705 != 0))))
4706 result--;
4707
4708 return MAX (1, result);
4709
4710 case UDIV:
4711 /* The result must be <= the first operand. If the first operand
4712 has the high bit set, we know nothing about the number of sign
4713 bit copies. */
4714 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4715 return 1;
4716 else if ((nonzero_bits (XEXP (x, 0), mode)
4717 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4718 return 1;
4719 else
4720 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4721 known_x, known_mode, known_ret);
4722
4723 case UMOD:
4724 /* The result must be <= the second operand. If the second operand
4725 has (or just might have) the high bit set, we know nothing about
4726 the number of sign bit copies. */
4727 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4728 return 1;
4729 else if ((nonzero_bits (XEXP (x, 1), mode)
4730 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4731 return 1;
4732 else
4733 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4734 known_x, known_mode, known_ret);
4735
4736 case DIV:
4737 /* Similar to unsigned division, except that we have to worry about
4738 the case where the divisor is negative, in which case we have
4739 to add 1. */
4740 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4741 known_x, known_mode, known_ret);
4742 if (result > 1
4743 && (bitwidth > HOST_BITS_PER_WIDE_INT
4744 || (nonzero_bits (XEXP (x, 1), mode)
4745 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4746 result--;
4747
4748 return result;
4749
4750 case MOD:
4751 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4752 known_x, known_mode, known_ret);
4753 if (result > 1
4754 && (bitwidth > HOST_BITS_PER_WIDE_INT
4755 || (nonzero_bits (XEXP (x, 1), mode)
4756 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4757 result--;
4758
4759 return result;
4760
4761 case ASHIFTRT:
4762 /* Shifts by a constant add to the number of bits equal to the
4763 sign bit. */
4764 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4765 known_x, known_mode, known_ret);
4766 if (CONST_INT_P (XEXP (x, 1))
4767 && INTVAL (XEXP (x, 1)) > 0
4768 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4769 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4770
4771 return num0;
4772
4773 case ASHIFT:
4774 /* Left shifts destroy copies. */
4775 if (!CONST_INT_P (XEXP (x, 1))
4776 || INTVAL (XEXP (x, 1)) < 0
4777 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4778 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4779 return 1;
4780
4781 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4782 known_x, known_mode, known_ret);
4783 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4784
4785 case IF_THEN_ELSE:
4786 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4787 known_x, known_mode, known_ret);
4788 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4789 known_x, known_mode, known_ret);
4790 return MIN (num0, num1);
4791
4792 case EQ: case NE: case GE: case GT: case LE: case LT:
4793 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4794 case GEU: case GTU: case LEU: case LTU:
4795 case UNORDERED: case ORDERED:
4796 /* If the constant is negative, take its 1's complement and remask.
4797 Then see how many zero bits we have. */
4798 nonzero = STORE_FLAG_VALUE;
4799 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4800 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4801 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4802
4803 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4804
4805 default:
4806 break;
4807 }
4808
4809 /* If we haven't been able to figure it out by one of the above rules,
4810 see if some of the high-order bits are known to be zero. If so,
4811 count those bits and return one less than that amount. If we can't
4812 safely compute the mask for this mode, always return BITWIDTH. */
4813
4814 bitwidth = GET_MODE_PRECISION (mode);
4815 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4816 return 1;
4817
4818 nonzero = nonzero_bits (x, mode);
4819 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4820 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4821 }
4822
4823 /* Calculate the rtx_cost of a single instruction. A return value of
4824 zero indicates an instruction pattern without a known cost. */
4825
4826 int
4827 insn_rtx_cost (rtx pat, bool speed)
4828 {
4829 int i, cost;
4830 rtx set;
4831
4832 /* Extract the single set rtx from the instruction pattern.
4833 We can't use single_set since we only have the pattern. */
4834 if (GET_CODE (pat) == SET)
4835 set = pat;
4836 else if (GET_CODE (pat) == PARALLEL)
4837 {
4838 set = NULL_RTX;
4839 for (i = 0; i < XVECLEN (pat, 0); i++)
4840 {
4841 rtx x = XVECEXP (pat, 0, i);
4842 if (GET_CODE (x) == SET)
4843 {
4844 if (set)
4845 return 0;
4846 set = x;
4847 }
4848 }
4849 if (!set)
4850 return 0;
4851 }
4852 else
4853 return 0;
4854
4855 cost = set_src_cost (SET_SRC (set), speed);
4856 return cost > 0 ? cost : COSTS_N_INSNS (1);
4857 }
4858
4859 /* Given an insn INSN and condition COND, return the condition in a
4860 canonical form to simplify testing by callers. Specifically:
4861
4862 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4863 (2) Both operands will be machine operands; (cc0) will have been replaced.
4864 (3) If an operand is a constant, it will be the second operand.
4865 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4866 for GE, GEU, and LEU.
4867
4868 If the condition cannot be understood, or is an inequality floating-point
4869 comparison which needs to be reversed, 0 will be returned.
4870
4871 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4872
4873 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4874 insn used in locating the condition was found. If a replacement test
4875 of the condition is desired, it should be placed in front of that
4876 insn and we will be sure that the inputs are still valid.
4877
4878 If WANT_REG is nonzero, we wish the condition to be relative to that
4879 register, if possible. Therefore, do not canonicalize the condition
4880 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4881 to be a compare to a CC mode register.
4882
4883 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4884 and at INSN. */
4885
4886 rtx
4887 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4888 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4889 {
4890 enum rtx_code code;
4891 rtx prev = insn;
4892 const_rtx set;
4893 rtx tem;
4894 rtx op0, op1;
4895 int reverse_code = 0;
4896 enum machine_mode mode;
4897 basic_block bb = BLOCK_FOR_INSN (insn);
4898
4899 code = GET_CODE (cond);
4900 mode = GET_MODE (cond);
4901 op0 = XEXP (cond, 0);
4902 op1 = XEXP (cond, 1);
4903
4904 if (reverse)
4905 code = reversed_comparison_code (cond, insn);
4906 if (code == UNKNOWN)
4907 return 0;
4908
4909 if (earliest)
4910 *earliest = insn;
4911
4912 /* If we are comparing a register with zero, see if the register is set
4913 in the previous insn to a COMPARE or a comparison operation. Perform
4914 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4915 in cse.c */
4916
4917 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4918 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4919 && op1 == CONST0_RTX (GET_MODE (op0))
4920 && op0 != want_reg)
4921 {
4922 /* Set nonzero when we find something of interest. */
4923 rtx x = 0;
4924
4925 #ifdef HAVE_cc0
4926 /* If comparison with cc0, import actual comparison from compare
4927 insn. */
4928 if (op0 == cc0_rtx)
4929 {
4930 if ((prev = prev_nonnote_insn (prev)) == 0
4931 || !NONJUMP_INSN_P (prev)
4932 || (set = single_set (prev)) == 0
4933 || SET_DEST (set) != cc0_rtx)
4934 return 0;
4935
4936 op0 = SET_SRC (set);
4937 op1 = CONST0_RTX (GET_MODE (op0));
4938 if (earliest)
4939 *earliest = prev;
4940 }
4941 #endif
4942
4943 /* If this is a COMPARE, pick up the two things being compared. */
4944 if (GET_CODE (op0) == COMPARE)
4945 {
4946 op1 = XEXP (op0, 1);
4947 op0 = XEXP (op0, 0);
4948 continue;
4949 }
4950 else if (!REG_P (op0))
4951 break;
4952
4953 /* Go back to the previous insn. Stop if it is not an INSN. We also
4954 stop if it isn't a single set or if it has a REG_INC note because
4955 we don't want to bother dealing with it. */
4956
4957 prev = prev_nonnote_nondebug_insn (prev);
4958
4959 if (prev == 0
4960 || !NONJUMP_INSN_P (prev)
4961 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4962 /* In cfglayout mode, there do not have to be labels at the
4963 beginning of a block, or jumps at the end, so the previous
4964 conditions would not stop us when we reach bb boundary. */
4965 || BLOCK_FOR_INSN (prev) != bb)
4966 break;
4967
4968 set = set_of (op0, prev);
4969
4970 if (set
4971 && (GET_CODE (set) != SET
4972 || !rtx_equal_p (SET_DEST (set), op0)))
4973 break;
4974
4975 /* If this is setting OP0, get what it sets it to if it looks
4976 relevant. */
4977 if (set)
4978 {
4979 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4980 #ifdef FLOAT_STORE_FLAG_VALUE
4981 REAL_VALUE_TYPE fsfv;
4982 #endif
4983
4984 /* ??? We may not combine comparisons done in a CCmode with
4985 comparisons not done in a CCmode. This is to aid targets
4986 like Alpha that have an IEEE compliant EQ instruction, and
4987 a non-IEEE compliant BEQ instruction. The use of CCmode is
4988 actually artificial, simply to prevent the combination, but
4989 should not affect other platforms.
4990
4991 However, we must allow VOIDmode comparisons to match either
4992 CCmode or non-CCmode comparison, because some ports have
4993 modeless comparisons inside branch patterns.
4994
4995 ??? This mode check should perhaps look more like the mode check
4996 in simplify_comparison in combine. */
4997
4998 if ((GET_CODE (SET_SRC (set)) == COMPARE
4999 || (((code == NE
5000 || (code == LT
5001 && val_signbit_known_set_p (inner_mode,
5002 STORE_FLAG_VALUE))
5003 #ifdef FLOAT_STORE_FLAG_VALUE
5004 || (code == LT
5005 && SCALAR_FLOAT_MODE_P (inner_mode)
5006 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5007 REAL_VALUE_NEGATIVE (fsfv)))
5008 #endif
5009 ))
5010 && COMPARISON_P (SET_SRC (set))))
5011 && (((GET_MODE_CLASS (mode) == MODE_CC)
5012 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5013 || mode == VOIDmode || inner_mode == VOIDmode))
5014 x = SET_SRC (set);
5015 else if (((code == EQ
5016 || (code == GE
5017 && val_signbit_known_set_p (inner_mode,
5018 STORE_FLAG_VALUE))
5019 #ifdef FLOAT_STORE_FLAG_VALUE
5020 || (code == GE
5021 && SCALAR_FLOAT_MODE_P (inner_mode)
5022 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5023 REAL_VALUE_NEGATIVE (fsfv)))
5024 #endif
5025 ))
5026 && COMPARISON_P (SET_SRC (set))
5027 && (((GET_MODE_CLASS (mode) == MODE_CC)
5028 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5029 || mode == VOIDmode || inner_mode == VOIDmode))
5030
5031 {
5032 reverse_code = 1;
5033 x = SET_SRC (set);
5034 }
5035 else
5036 break;
5037 }
5038
5039 else if (reg_set_p (op0, prev))
5040 /* If this sets OP0, but not directly, we have to give up. */
5041 break;
5042
5043 if (x)
5044 {
5045 /* If the caller is expecting the condition to be valid at INSN,
5046 make sure X doesn't change before INSN. */
5047 if (valid_at_insn_p)
5048 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5049 break;
5050 if (COMPARISON_P (x))
5051 code = GET_CODE (x);
5052 if (reverse_code)
5053 {
5054 code = reversed_comparison_code (x, prev);
5055 if (code == UNKNOWN)
5056 return 0;
5057 reverse_code = 0;
5058 }
5059
5060 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5061 if (earliest)
5062 *earliest = prev;
5063 }
5064 }
5065
5066 /* If constant is first, put it last. */
5067 if (CONSTANT_P (op0))
5068 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5069
5070 /* If OP0 is the result of a comparison, we weren't able to find what
5071 was really being compared, so fail. */
5072 if (!allow_cc_mode
5073 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5074 return 0;
5075
5076 /* Canonicalize any ordered comparison with integers involving equality
5077 if we can do computations in the relevant mode and we do not
5078 overflow. */
5079
5080 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5081 && CONST_INT_P (op1)
5082 && GET_MODE (op0) != VOIDmode
5083 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5084 {
5085 HOST_WIDE_INT const_val = INTVAL (op1);
5086 unsigned HOST_WIDE_INT uconst_val = const_val;
5087 unsigned HOST_WIDE_INT max_val
5088 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5089
5090 switch (code)
5091 {
5092 case LE:
5093 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5094 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5095 break;
5096
5097 /* When cross-compiling, const_val might be sign-extended from
5098 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5099 case GE:
5100 if ((const_val & max_val)
5101 != ((unsigned HOST_WIDE_INT) 1
5102 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5103 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5104 break;
5105
5106 case LEU:
5107 if (uconst_val < max_val)
5108 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5109 break;
5110
5111 case GEU:
5112 if (uconst_val != 0)
5113 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5114 break;
5115
5116 default:
5117 break;
5118 }
5119 }
5120
5121 /* Never return CC0; return zero instead. */
5122 if (CC0_P (op0))
5123 return 0;
5124
5125 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5126 }
5127
5128 /* Given a jump insn JUMP, return the condition that will cause it to branch
5129 to its JUMP_LABEL. If the condition cannot be understood, or is an
5130 inequality floating-point comparison which needs to be reversed, 0 will
5131 be returned.
5132
5133 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5134 insn used in locating the condition was found. If a replacement test
5135 of the condition is desired, it should be placed in front of that
5136 insn and we will be sure that the inputs are still valid. If EARLIEST
5137 is null, the returned condition will be valid at INSN.
5138
5139 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5140 compare CC mode register.
5141
5142 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5143
5144 rtx
5145 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5146 {
5147 rtx cond;
5148 int reverse;
5149 rtx set;
5150
5151 /* If this is not a standard conditional jump, we can't parse it. */
5152 if (!JUMP_P (jump)
5153 || ! any_condjump_p (jump))
5154 return 0;
5155 set = pc_set (jump);
5156
5157 cond = XEXP (SET_SRC (set), 0);
5158
5159 /* If this branches to JUMP_LABEL when the condition is false, reverse
5160 the condition. */
5161 reverse
5162 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5163 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5164
5165 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5166 allow_cc_mode, valid_at_insn_p);
5167 }
5168
5169 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5170 TARGET_MODE_REP_EXTENDED.
5171
5172 Note that we assume that the property of
5173 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5174 narrower than mode B. I.e., if A is a mode narrower than B then in
5175 order to be able to operate on it in mode B, mode A needs to
5176 satisfy the requirements set by the representation of mode B. */
5177
5178 static void
5179 init_num_sign_bit_copies_in_rep (void)
5180 {
5181 enum machine_mode mode, in_mode;
5182
5183 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5184 in_mode = GET_MODE_WIDER_MODE (mode))
5185 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5186 mode = GET_MODE_WIDER_MODE (mode))
5187 {
5188 enum machine_mode i;
5189
5190 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5191 extends to the next widest mode. */
5192 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5193 || GET_MODE_WIDER_MODE (mode) == in_mode);
5194
5195 /* We are in in_mode. Count how many bits outside of mode
5196 have to be copies of the sign-bit. */
5197 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5198 {
5199 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5200
5201 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5202 /* We can only check sign-bit copies starting from the
5203 top-bit. In order to be able to check the bits we
5204 have already seen we pretend that subsequent bits
5205 have to be sign-bit copies too. */
5206 || num_sign_bit_copies_in_rep [in_mode][mode])
5207 num_sign_bit_copies_in_rep [in_mode][mode]
5208 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5209 }
5210 }
5211 }
5212
5213 /* Suppose that truncation from the machine mode of X to MODE is not a
5214 no-op. See if there is anything special about X so that we can
5215 assume it already contains a truncated value of MODE. */
5216
5217 bool
5218 truncated_to_mode (enum machine_mode mode, const_rtx x)
5219 {
5220 /* This register has already been used in MODE without explicit
5221 truncation. */
5222 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5223 return true;
5224
5225 /* See if we already satisfy the requirements of MODE. If yes we
5226 can just switch to MODE. */
5227 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5228 && (num_sign_bit_copies (x, GET_MODE (x))
5229 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5230 return true;
5231
5232 return false;
5233 }
5234 \f
5235 /* Initialize non_rtx_starting_operands, which is used to speed up
5236 for_each_rtx. */
5237 void
5238 init_rtlanal (void)
5239 {
5240 int i;
5241 for (i = 0; i < NUM_RTX_CODE; i++)
5242 {
5243 const char *format = GET_RTX_FORMAT (i);
5244 const char *first = strpbrk (format, "eEV");
5245 non_rtx_starting_operands[i] = first ? first - format : -1;
5246 }
5247
5248 init_num_sign_bit_copies_in_rep ();
5249 }
5250 \f
5251 /* Check whether this is a constant pool constant. */
5252 bool
5253 constant_pool_constant_p (rtx x)
5254 {
5255 x = avoid_constant_pool_reference (x);
5256 return CONST_DOUBLE_P (x);
5257 }
5258 \f
5259 /* If M is a bitmask that selects a field of low-order bits within an item but
5260 not the entire word, return the length of the field. Return -1 otherwise.
5261 M is used in machine mode MODE. */
5262
5263 int
5264 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5265 {
5266 if (mode != VOIDmode)
5267 {
5268 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5269 return -1;
5270 m &= GET_MODE_MASK (mode);
5271 }
5272
5273 return exact_log2 (m + 1);
5274 }
5275
5276 /* Return the mode of MEM's address. */
5277
5278 enum machine_mode
5279 get_address_mode (rtx mem)
5280 {
5281 enum machine_mode mode;
5282
5283 gcc_assert (MEM_P (mem));
5284 mode = GET_MODE (XEXP (mem, 0));
5285 if (mode != VOIDmode)
5286 return mode;
5287 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5288 }
5289 \f
5290 /* Split up a CONST_DOUBLE or integer constant rtx
5291 into two rtx's for single words,
5292 storing in *FIRST the word that comes first in memory in the target
5293 and in *SECOND the other. */
5294
5295 void
5296 split_double (rtx value, rtx *first, rtx *second)
5297 {
5298 if (CONST_INT_P (value))
5299 {
5300 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5301 {
5302 /* In this case the CONST_INT holds both target words.
5303 Extract the bits from it into two word-sized pieces.
5304 Sign extend each half to HOST_WIDE_INT. */
5305 unsigned HOST_WIDE_INT low, high;
5306 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5307 unsigned bits_per_word = BITS_PER_WORD;
5308
5309 /* Set sign_bit to the most significant bit of a word. */
5310 sign_bit = 1;
5311 sign_bit <<= bits_per_word - 1;
5312
5313 /* Set mask so that all bits of the word are set. We could
5314 have used 1 << BITS_PER_WORD instead of basing the
5315 calculation on sign_bit. However, on machines where
5316 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5317 compiler warning, even though the code would never be
5318 executed. */
5319 mask = sign_bit << 1;
5320 mask--;
5321
5322 /* Set sign_extend as any remaining bits. */
5323 sign_extend = ~mask;
5324
5325 /* Pick the lower word and sign-extend it. */
5326 low = INTVAL (value);
5327 low &= mask;
5328 if (low & sign_bit)
5329 low |= sign_extend;
5330
5331 /* Pick the higher word, shifted to the least significant
5332 bits, and sign-extend it. */
5333 high = INTVAL (value);
5334 high >>= bits_per_word - 1;
5335 high >>= 1;
5336 high &= mask;
5337 if (high & sign_bit)
5338 high |= sign_extend;
5339
5340 /* Store the words in the target machine order. */
5341 if (WORDS_BIG_ENDIAN)
5342 {
5343 *first = GEN_INT (high);
5344 *second = GEN_INT (low);
5345 }
5346 else
5347 {
5348 *first = GEN_INT (low);
5349 *second = GEN_INT (high);
5350 }
5351 }
5352 else
5353 {
5354 /* The rule for using CONST_INT for a wider mode
5355 is that we regard the value as signed.
5356 So sign-extend it. */
5357 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5358 if (WORDS_BIG_ENDIAN)
5359 {
5360 *first = high;
5361 *second = value;
5362 }
5363 else
5364 {
5365 *first = value;
5366 *second = high;
5367 }
5368 }
5369 }
5370 else if (!CONST_DOUBLE_P (value))
5371 {
5372 if (WORDS_BIG_ENDIAN)
5373 {
5374 *first = const0_rtx;
5375 *second = value;
5376 }
5377 else
5378 {
5379 *first = value;
5380 *second = const0_rtx;
5381 }
5382 }
5383 else if (GET_MODE (value) == VOIDmode
5384 /* This is the old way we did CONST_DOUBLE integers. */
5385 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5386 {
5387 /* In an integer, the words are defined as most and least significant.
5388 So order them by the target's convention. */
5389 if (WORDS_BIG_ENDIAN)
5390 {
5391 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5392 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5393 }
5394 else
5395 {
5396 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5397 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5398 }
5399 }
5400 else
5401 {
5402 REAL_VALUE_TYPE r;
5403 long l[2];
5404 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5405
5406 /* Note, this converts the REAL_VALUE_TYPE to the target's
5407 format, splits up the floating point double and outputs
5408 exactly 32 bits of it into each of l[0] and l[1] --
5409 not necessarily BITS_PER_WORD bits. */
5410 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5411
5412 /* If 32 bits is an entire word for the target, but not for the host,
5413 then sign-extend on the host so that the number will look the same
5414 way on the host that it would on the target. See for instance
5415 simplify_unary_operation. The #if is needed to avoid compiler
5416 warnings. */
5417
5418 #if HOST_BITS_PER_LONG > 32
5419 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5420 {
5421 if (l[0] & ((long) 1 << 31))
5422 l[0] |= ((long) (-1) << 32);
5423 if (l[1] & ((long) 1 << 31))
5424 l[1] |= ((long) (-1) << 32);
5425 }
5426 #endif
5427
5428 *first = GEN_INT (l[0]);
5429 *second = GEN_INT (l[1]);
5430 }
5431 }
5432
5433 /* Strip outer address "mutations" from LOC and return a pointer to the
5434 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5435 stripped expression there.
5436
5437 "Mutations" either convert between modes or apply some kind of
5438 alignment. */
5439
5440 rtx *
5441 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5442 {
5443 for (;;)
5444 {
5445 enum rtx_code code = GET_CODE (*loc);
5446 if (GET_RTX_CLASS (code) == RTX_UNARY)
5447 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5448 used to convert between pointer sizes. */
5449 loc = &XEXP (*loc, 0);
5450 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5451 /* (and ... (const_int -X)) is used to align to X bytes. */
5452 loc = &XEXP (*loc, 0);
5453 else if (code == SUBREG
5454 && !OBJECT_P (SUBREG_REG (*loc))
5455 && subreg_lowpart_p (*loc))
5456 /* (subreg (operator ...) ...) inside and is used for mode
5457 conversion too. */
5458 loc = &SUBREG_REG (*loc);
5459 else
5460 return loc;
5461 if (outer_code)
5462 *outer_code = code;
5463 }
5464 }
5465
5466 /* Return true if X must be a base rather than an index. */
5467
5468 static bool
5469 must_be_base_p (rtx x)
5470 {
5471 return GET_CODE (x) == LO_SUM;
5472 }
5473
5474 /* Return true if X must be an index rather than a base. */
5475
5476 static bool
5477 must_be_index_p (rtx x)
5478 {
5479 return GET_CODE (x) == MULT || GET_CODE (x) == ASHIFT;
5480 }
5481
5482 /* Set the segment part of address INFO to LOC, given that INNER is the
5483 unmutated value. */
5484
5485 static void
5486 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5487 {
5488 gcc_checking_assert (GET_CODE (*inner) == UNSPEC);
5489
5490 gcc_assert (!info->segment);
5491 info->segment = loc;
5492 info->segment_term = inner;
5493 }
5494
5495 /* Set the base part of address INFO to LOC, given that INNER is the
5496 unmutated value. */
5497
5498 static void
5499 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5500 {
5501 if (GET_CODE (*inner) == LO_SUM)
5502 inner = strip_address_mutations (&XEXP (*inner, 0));
5503 gcc_checking_assert (REG_P (*inner)
5504 || MEM_P (*inner)
5505 || GET_CODE (*inner) == SUBREG);
5506
5507 gcc_assert (!info->base);
5508 info->base = loc;
5509 info->base_term = inner;
5510 }
5511
5512 /* Set the index part of address INFO to LOC, given that INNER is the
5513 unmutated value. */
5514
5515 static void
5516 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5517 {
5518 if ((GET_CODE (*inner) == MULT || GET_CODE (*inner) == ASHIFT)
5519 && CONSTANT_P (XEXP (*inner, 1)))
5520 inner = strip_address_mutations (&XEXP (*inner, 0));
5521 gcc_checking_assert (REG_P (*inner)
5522 || MEM_P (*inner)
5523 || GET_CODE (*inner) == SUBREG);
5524
5525 gcc_assert (!info->index);
5526 info->index = loc;
5527 info->index_term = inner;
5528 }
5529
5530 /* Set the displacement part of address INFO to LOC, given that INNER
5531 is the constant term. */
5532
5533 static void
5534 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5535 {
5536 gcc_checking_assert (CONSTANT_P (*inner));
5537
5538 gcc_assert (!info->disp);
5539 info->disp = loc;
5540 info->disp_term = inner;
5541 }
5542
5543 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5544 rest of INFO accordingly. */
5545
5546 static void
5547 decompose_incdec_address (struct address_info *info)
5548 {
5549 info->autoinc_p = true;
5550
5551 rtx *base = &XEXP (*info->inner, 0);
5552 set_address_base (info, base, base);
5553 gcc_checking_assert (info->base == info->base_term);
5554
5555 /* These addresses are only valid when the size of the addressed
5556 value is known. */
5557 gcc_checking_assert (info->mode != VOIDmode);
5558 }
5559
5560 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5561 of INFO accordingly. */
5562
5563 static void
5564 decompose_automod_address (struct address_info *info)
5565 {
5566 info->autoinc_p = true;
5567
5568 rtx *base = &XEXP (*info->inner, 0);
5569 set_address_base (info, base, base);
5570 gcc_checking_assert (info->base == info->base_term);
5571
5572 rtx plus = XEXP (*info->inner, 1);
5573 gcc_assert (GET_CODE (plus) == PLUS);
5574
5575 info->base_term2 = &XEXP (plus, 0);
5576 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5577
5578 rtx *step = &XEXP (plus, 1);
5579 rtx *inner_step = strip_address_mutations (step);
5580 if (CONSTANT_P (*inner_step))
5581 set_address_disp (info, step, inner_step);
5582 else
5583 set_address_index (info, step, inner_step);
5584 }
5585
5586 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5587 values in [PTR, END). Return a pointer to the end of the used array. */
5588
5589 static rtx **
5590 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5591 {
5592 rtx x = *loc;
5593 if (GET_CODE (x) == PLUS)
5594 {
5595 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5596 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5597 }
5598 else
5599 {
5600 gcc_assert (ptr != end);
5601 *ptr++ = loc;
5602 }
5603 return ptr;
5604 }
5605
5606 /* Evaluate the likelihood of X being a base or index value, returning
5607 positive if it is likely to be a base, negative if it is likely to be
5608 an index, and 0 if we can't tell. Make the magnitude of the return
5609 value reflect the amount of confidence we have in the answer.
5610
5611 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5612
5613 static int
5614 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5615 enum rtx_code outer_code, enum rtx_code index_code)
5616 {
5617 /* See whether we can be certain. */
5618 if (must_be_base_p (x))
5619 return 3;
5620 if (must_be_index_p (x))
5621 return -3;
5622
5623 /* Believe *_POINTER unless the address shape requires otherwise. */
5624 if (REG_P (x) && REG_POINTER (x))
5625 return 2;
5626 if (MEM_P (x) && MEM_POINTER (x))
5627 return 2;
5628
5629 if (REG_P (x) && HARD_REGISTER_P (x))
5630 {
5631 /* X is a hard register. If it only fits one of the base
5632 or index classes, choose that interpretation. */
5633 int regno = REGNO (x);
5634 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5635 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5636 if (base_p != index_p)
5637 return base_p ? 1 : -1;
5638 }
5639 return 0;
5640 }
5641
5642 /* INFO->INNER describes a normal, non-automodified address.
5643 Fill in the rest of INFO accordingly. */
5644
5645 static void
5646 decompose_normal_address (struct address_info *info)
5647 {
5648 /* Treat the address as the sum of up to four values. */
5649 rtx *ops[4];
5650 size_t n_ops = extract_plus_operands (info->inner, ops,
5651 ops + ARRAY_SIZE (ops)) - ops;
5652
5653 /* If there is more than one component, any base component is in a PLUS. */
5654 if (n_ops > 1)
5655 info->base_outer_code = PLUS;
5656
5657 /* Separate the parts that contain a REG or MEM from those that don't.
5658 Record the latter in INFO and leave the former in OPS. */
5659 rtx *inner_ops[4];
5660 size_t out = 0;
5661 for (size_t in = 0; in < n_ops; ++in)
5662 {
5663 rtx *loc = ops[in];
5664 rtx *inner = strip_address_mutations (loc);
5665 if (CONSTANT_P (*inner))
5666 set_address_disp (info, loc, inner);
5667 else if (GET_CODE (*inner) == UNSPEC)
5668 set_address_segment (info, loc, inner);
5669 else
5670 {
5671 ops[out] = loc;
5672 inner_ops[out] = inner;
5673 ++out;
5674 }
5675 }
5676
5677 /* Classify the remaining OPS members as bases and indexes. */
5678 if (out == 1)
5679 {
5680 /* Assume that the remaining value is a base unless the shape
5681 requires otherwise. */
5682 if (!must_be_index_p (*inner_ops[0]))
5683 set_address_base (info, ops[0], inner_ops[0]);
5684 else
5685 set_address_index (info, ops[0], inner_ops[0]);
5686 }
5687 else if (out == 2)
5688 {
5689 /* In the event of a tie, assume the base comes first. */
5690 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5691 GET_CODE (*ops[1]))
5692 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5693 GET_CODE (*ops[0])))
5694 {
5695 set_address_base (info, ops[0], inner_ops[0]);
5696 set_address_index (info, ops[1], inner_ops[1]);
5697 }
5698 else
5699 {
5700 set_address_base (info, ops[1], inner_ops[1]);
5701 set_address_index (info, ops[0], inner_ops[0]);
5702 }
5703 }
5704 else
5705 gcc_assert (out == 0);
5706 }
5707
5708 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5709 or VOIDmode if not known. AS is the address space associated with LOC.
5710 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5711
5712 void
5713 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5714 addr_space_t as, enum rtx_code outer_code)
5715 {
5716 memset (info, 0, sizeof (*info));
5717 info->mode = mode;
5718 info->as = as;
5719 info->addr_outer_code = outer_code;
5720 info->outer = loc;
5721 info->inner = strip_address_mutations (loc, &outer_code);
5722 info->base_outer_code = outer_code;
5723 switch (GET_CODE (*info->inner))
5724 {
5725 case PRE_DEC:
5726 case PRE_INC:
5727 case POST_DEC:
5728 case POST_INC:
5729 decompose_incdec_address (info);
5730 break;
5731
5732 case PRE_MODIFY:
5733 case POST_MODIFY:
5734 decompose_automod_address (info);
5735 break;
5736
5737 default:
5738 decompose_normal_address (info);
5739 break;
5740 }
5741 }
5742
5743 /* Describe address operand LOC in INFO. */
5744
5745 void
5746 decompose_lea_address (struct address_info *info, rtx *loc)
5747 {
5748 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5749 }
5750
5751 /* Describe the address of MEM X in INFO. */
5752
5753 void
5754 decompose_mem_address (struct address_info *info, rtx x)
5755 {
5756 gcc_assert (MEM_P (x));
5757 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5758 MEM_ADDR_SPACE (x), MEM);
5759 }
5760
5761 /* Update INFO after a change to the address it describes. */
5762
5763 void
5764 update_address (struct address_info *info)
5765 {
5766 decompose_address (info, info->outer, info->mode, info->as,
5767 info->addr_outer_code);
5768 }
5769
5770 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5771 more complicated than that. */
5772
5773 HOST_WIDE_INT
5774 get_index_scale (const struct address_info *info)
5775 {
5776 rtx index = *info->index;
5777 if (GET_CODE (index) == MULT
5778 && CONST_INT_P (XEXP (index, 1))
5779 && info->index_term == &XEXP (index, 0))
5780 return INTVAL (XEXP (index, 1));
5781
5782 if (GET_CODE (index) == ASHIFT
5783 && CONST_INT_P (XEXP (index, 1))
5784 && info->index_term == &XEXP (index, 0))
5785 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
5786
5787 if (info->index == info->index_term)
5788 return 1;
5789
5790 return 0;
5791 }
5792
5793 /* Return the "index code" of INFO, in the form required by
5794 ok_for_base_p_1. */
5795
5796 enum rtx_code
5797 get_index_code (const struct address_info *info)
5798 {
5799 if (info->index)
5800 return GET_CODE (*info->index);
5801
5802 if (info->disp)
5803 return GET_CODE (*info->disp);
5804
5805 return SCRATCH;
5806 }