Remove libcall notes.
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software
4 Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "recog.h"
32 #include "target.h"
33 #include "output.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "real.h"
37 #include "regs.h"
38 #include "function.h"
39 #include "df.h"
40 #include "tree.h"
41
42 /* Information about a subreg of a hard register. */
43 struct subreg_info
44 {
45 /* Offset of first hard register involved in the subreg. */
46 int offset;
47 /* Number of hard registers involved in the subreg. */
48 int nregs;
49 /* Whether this subreg can be represented as a hard reg with the new
50 mode. */
51 bool representable_p;
52 };
53
54 /* Forward declarations */
55 static void set_of_1 (rtx, const_rtx, void *);
56 static bool covers_regno_p (const_rtx, unsigned int);
57 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
58 static int rtx_referenced_p_1 (rtx *, void *);
59 static int computed_jump_p_1 (const_rtx);
60 static void parms_set (rtx, const_rtx, void *);
61 static void subreg_get_info (unsigned int, enum machine_mode,
62 unsigned int, enum machine_mode,
63 struct subreg_info *);
64
65 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
66 const_rtx, enum machine_mode,
67 unsigned HOST_WIDE_INT);
68 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
69 const_rtx, enum machine_mode,
70 unsigned HOST_WIDE_INT);
71 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
72 enum machine_mode,
73 unsigned int);
74 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
75 enum machine_mode, unsigned int);
76
77 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
78 -1 if a code has no such operand. */
79 static int non_rtx_starting_operands[NUM_RTX_CODE];
80
81 /* Bit flags that specify the machine subtype we are compiling for.
82 Bits are tested using macros TARGET_... defined in the tm.h file
83 and set by `-m...' switches. Must be defined in rtlanal.c. */
84
85 int target_flags;
86
87 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
88 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
89 SIGN_EXTEND then while narrowing we also have to enforce the
90 representation and sign-extend the value to mode DESTINATION_REP.
91
92 If the value is already sign-extended to DESTINATION_REP mode we
93 can just switch to DESTINATION mode on it. For each pair of
94 integral modes SOURCE and DESTINATION, when truncating from SOURCE
95 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
96 contains the number of high-order bits in SOURCE that have to be
97 copies of the sign-bit so that we can do this mode-switch to
98 DESTINATION. */
99
100 static unsigned int
101 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
102 \f
103 /* Return 1 if the value of X is unstable
104 (would be different at a different point in the program).
105 The frame pointer, arg pointer, etc. are considered stable
106 (within one function) and so is anything marked `unchanging'. */
107
108 int
109 rtx_unstable_p (const_rtx x)
110 {
111 const RTX_CODE code = GET_CODE (x);
112 int i;
113 const char *fmt;
114
115 switch (code)
116 {
117 case MEM:
118 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
119
120 case CONST:
121 case CONST_INT:
122 case CONST_DOUBLE:
123 case CONST_FIXED:
124 case CONST_VECTOR:
125 case SYMBOL_REF:
126 case LABEL_REF:
127 return 0;
128
129 case REG:
130 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
131 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
132 /* The arg pointer varies if it is not a fixed register. */
133 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
134 return 0;
135 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
136 /* ??? When call-clobbered, the value is stable modulo the restore
137 that must happen after a call. This currently screws up local-alloc
138 into believing that the restore is not needed. */
139 if (x == pic_offset_table_rtx)
140 return 0;
141 #endif
142 return 1;
143
144 case ASM_OPERANDS:
145 if (MEM_VOLATILE_P (x))
146 return 1;
147
148 /* Fall through. */
149
150 default:
151 break;
152 }
153
154 fmt = GET_RTX_FORMAT (code);
155 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
156 if (fmt[i] == 'e')
157 {
158 if (rtx_unstable_p (XEXP (x, i)))
159 return 1;
160 }
161 else if (fmt[i] == 'E')
162 {
163 int j;
164 for (j = 0; j < XVECLEN (x, i); j++)
165 if (rtx_unstable_p (XVECEXP (x, i, j)))
166 return 1;
167 }
168
169 return 0;
170 }
171
172 /* Return 1 if X has a value that can vary even between two
173 executions of the program. 0 means X can be compared reliably
174 against certain constants or near-constants.
175 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
176 zero, we are slightly more conservative.
177 The frame pointer and the arg pointer are considered constant. */
178
179 bool
180 rtx_varies_p (const_rtx x, bool for_alias)
181 {
182 RTX_CODE code;
183 int i;
184 const char *fmt;
185
186 if (!x)
187 return 0;
188
189 code = GET_CODE (x);
190 switch (code)
191 {
192 case MEM:
193 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
194
195 case CONST:
196 case CONST_INT:
197 case CONST_DOUBLE:
198 case CONST_FIXED:
199 case CONST_VECTOR:
200 case SYMBOL_REF:
201 case LABEL_REF:
202 return 0;
203
204 case REG:
205 /* Note that we have to test for the actual rtx used for the frame
206 and arg pointers and not just the register number in case we have
207 eliminated the frame and/or arg pointer and are using it
208 for pseudos. */
209 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
210 /* The arg pointer varies if it is not a fixed register. */
211 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
212 return 0;
213 if (x == pic_offset_table_rtx
214 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
215 /* ??? When call-clobbered, the value is stable modulo the restore
216 that must happen after a call. This currently screws up
217 local-alloc into believing that the restore is not needed, so we
218 must return 0 only if we are called from alias analysis. */
219 && for_alias
220 #endif
221 )
222 return 0;
223 return 1;
224
225 case LO_SUM:
226 /* The operand 0 of a LO_SUM is considered constant
227 (in fact it is related specifically to operand 1)
228 during alias analysis. */
229 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
230 || rtx_varies_p (XEXP (x, 1), for_alias);
231
232 case ASM_OPERANDS:
233 if (MEM_VOLATILE_P (x))
234 return 1;
235
236 /* Fall through. */
237
238 default:
239 break;
240 }
241
242 fmt = GET_RTX_FORMAT (code);
243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
244 if (fmt[i] == 'e')
245 {
246 if (rtx_varies_p (XEXP (x, i), for_alias))
247 return 1;
248 }
249 else if (fmt[i] == 'E')
250 {
251 int j;
252 for (j = 0; j < XVECLEN (x, i); j++)
253 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
254 return 1;
255 }
256
257 return 0;
258 }
259
260 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
261 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
262 whether nonzero is returned for unaligned memory accesses on strict
263 alignment machines. */
264
265 static int
266 rtx_addr_can_trap_p_1 (const_rtx x, enum machine_mode mode, bool unaligned_mems)
267 {
268 enum rtx_code code = GET_CODE (x);
269
270 switch (code)
271 {
272 case SYMBOL_REF:
273 return SYMBOL_REF_WEAK (x);
274
275 case LABEL_REF:
276 return 0;
277
278 case REG:
279 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
280 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
281 || x == stack_pointer_rtx
282 /* The arg pointer varies if it is not a fixed register. */
283 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
284 return 0;
285 /* All of the virtual frame registers are stack references. */
286 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
287 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
288 return 0;
289 return 1;
290
291 case CONST:
292 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
293
294 case PLUS:
295 /* An address is assumed not to trap if:
296 - it is an address that can't trap plus a constant integer,
297 with the proper remainder modulo the mode size if we are
298 considering unaligned memory references. */
299 if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
300 && GET_CODE (XEXP (x, 1)) == CONST_INT)
301 {
302 HOST_WIDE_INT offset;
303
304 if (!STRICT_ALIGNMENT
305 || !unaligned_mems
306 || GET_MODE_SIZE (mode) == 0)
307 return 0;
308
309 offset = INTVAL (XEXP (x, 1));
310
311 #ifdef SPARC_STACK_BOUNDARY_HACK
312 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
313 the real alignment of %sp. However, when it does this, the
314 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
315 if (SPARC_STACK_BOUNDARY_HACK
316 && (XEXP (x, 0) == stack_pointer_rtx
317 || XEXP (x, 0) == hard_frame_pointer_rtx))
318 offset -= STACK_POINTER_OFFSET;
319 #endif
320
321 return offset % GET_MODE_SIZE (mode) != 0;
322 }
323
324 /* - or it is the pic register plus a constant. */
325 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
326 return 0;
327
328 return 1;
329
330 case LO_SUM:
331 case PRE_MODIFY:
332 return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
333
334 case PRE_DEC:
335 case PRE_INC:
336 case POST_DEC:
337 case POST_INC:
338 case POST_MODIFY:
339 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
340
341 default:
342 break;
343 }
344
345 /* If it isn't one of the case above, it can cause a trap. */
346 return 1;
347 }
348
349 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
350
351 int
352 rtx_addr_can_trap_p (const_rtx x)
353 {
354 return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
355 }
356
357 /* Return true if X is an address that is known to not be zero. */
358
359 bool
360 nonzero_address_p (const_rtx x)
361 {
362 const enum rtx_code code = GET_CODE (x);
363
364 switch (code)
365 {
366 case SYMBOL_REF:
367 return !SYMBOL_REF_WEAK (x);
368
369 case LABEL_REF:
370 return true;
371
372 case REG:
373 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
374 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
375 || x == stack_pointer_rtx
376 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
377 return true;
378 /* All of the virtual frame registers are stack references. */
379 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
380 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
381 return true;
382 return false;
383
384 case CONST:
385 return nonzero_address_p (XEXP (x, 0));
386
387 case PLUS:
388 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
389 return nonzero_address_p (XEXP (x, 0));
390 /* Handle PIC references. */
391 else if (XEXP (x, 0) == pic_offset_table_rtx
392 && CONSTANT_P (XEXP (x, 1)))
393 return true;
394 return false;
395
396 case PRE_MODIFY:
397 /* Similar to the above; allow positive offsets. Further, since
398 auto-inc is only allowed in memories, the register must be a
399 pointer. */
400 if (GET_CODE (XEXP (x, 1)) == CONST_INT
401 && INTVAL (XEXP (x, 1)) > 0)
402 return true;
403 return nonzero_address_p (XEXP (x, 0));
404
405 case PRE_INC:
406 /* Similarly. Further, the offset is always positive. */
407 return true;
408
409 case PRE_DEC:
410 case POST_DEC:
411 case POST_INC:
412 case POST_MODIFY:
413 return nonzero_address_p (XEXP (x, 0));
414
415 case LO_SUM:
416 return nonzero_address_p (XEXP (x, 1));
417
418 default:
419 break;
420 }
421
422 /* If it isn't one of the case above, might be zero. */
423 return false;
424 }
425
426 /* Return 1 if X refers to a memory location whose address
427 cannot be compared reliably with constant addresses,
428 or if X refers to a BLKmode memory object.
429 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
430 zero, we are slightly more conservative. */
431
432 bool
433 rtx_addr_varies_p (const_rtx x, bool for_alias)
434 {
435 enum rtx_code code;
436 int i;
437 const char *fmt;
438
439 if (x == 0)
440 return 0;
441
442 code = GET_CODE (x);
443 if (code == MEM)
444 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
445
446 fmt = GET_RTX_FORMAT (code);
447 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
448 if (fmt[i] == 'e')
449 {
450 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
451 return 1;
452 }
453 else if (fmt[i] == 'E')
454 {
455 int j;
456 for (j = 0; j < XVECLEN (x, i); j++)
457 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
458 return 1;
459 }
460 return 0;
461 }
462 \f
463 /* Return the value of the integer term in X, if one is apparent;
464 otherwise return 0.
465 Only obvious integer terms are detected.
466 This is used in cse.c with the `related_value' field. */
467
468 HOST_WIDE_INT
469 get_integer_term (const_rtx x)
470 {
471 if (GET_CODE (x) == CONST)
472 x = XEXP (x, 0);
473
474 if (GET_CODE (x) == MINUS
475 && GET_CODE (XEXP (x, 1)) == CONST_INT)
476 return - INTVAL (XEXP (x, 1));
477 if (GET_CODE (x) == PLUS
478 && GET_CODE (XEXP (x, 1)) == CONST_INT)
479 return INTVAL (XEXP (x, 1));
480 return 0;
481 }
482
483 /* If X is a constant, return the value sans apparent integer term;
484 otherwise return 0.
485 Only obvious integer terms are detected. */
486
487 rtx
488 get_related_value (const_rtx x)
489 {
490 if (GET_CODE (x) != CONST)
491 return 0;
492 x = XEXP (x, 0);
493 if (GET_CODE (x) == PLUS
494 && GET_CODE (XEXP (x, 1)) == CONST_INT)
495 return XEXP (x, 0);
496 else if (GET_CODE (x) == MINUS
497 && GET_CODE (XEXP (x, 1)) == CONST_INT)
498 return XEXP (x, 0);
499 return 0;
500 }
501 \f
502 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
503 to somewhere in the same object or object_block as SYMBOL. */
504
505 bool
506 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
507 {
508 tree decl;
509
510 if (GET_CODE (symbol) != SYMBOL_REF)
511 return false;
512
513 if (offset == 0)
514 return true;
515
516 if (offset > 0)
517 {
518 if (CONSTANT_POOL_ADDRESS_P (symbol)
519 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
520 return true;
521
522 decl = SYMBOL_REF_DECL (symbol);
523 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
524 return true;
525 }
526
527 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
528 && SYMBOL_REF_BLOCK (symbol)
529 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
530 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
531 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
532 return true;
533
534 return false;
535 }
536
537 /* Split X into a base and a constant offset, storing them in *BASE_OUT
538 and *OFFSET_OUT respectively. */
539
540 void
541 split_const (rtx x, rtx *base_out, rtx *offset_out)
542 {
543 if (GET_CODE (x) == CONST)
544 {
545 x = XEXP (x, 0);
546 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
547 {
548 *base_out = XEXP (x, 0);
549 *offset_out = XEXP (x, 1);
550 return;
551 }
552 }
553 *base_out = x;
554 *offset_out = const0_rtx;
555 }
556 \f
557 /* Return the number of places FIND appears within X. If COUNT_DEST is
558 zero, we do not count occurrences inside the destination of a SET. */
559
560 int
561 count_occurrences (const_rtx x, const_rtx find, int count_dest)
562 {
563 int i, j;
564 enum rtx_code code;
565 const char *format_ptr;
566 int count;
567
568 if (x == find)
569 return 1;
570
571 code = GET_CODE (x);
572
573 switch (code)
574 {
575 case REG:
576 case CONST_INT:
577 case CONST_DOUBLE:
578 case CONST_FIXED:
579 case CONST_VECTOR:
580 case SYMBOL_REF:
581 case CODE_LABEL:
582 case PC:
583 case CC0:
584 return 0;
585
586 case EXPR_LIST:
587 count = count_occurrences (XEXP (x, 0), find, count_dest);
588 if (XEXP (x, 1))
589 count += count_occurrences (XEXP (x, 1), find, count_dest);
590 return count;
591
592 case MEM:
593 if (MEM_P (find) && rtx_equal_p (x, find))
594 return 1;
595 break;
596
597 case SET:
598 if (SET_DEST (x) == find && ! count_dest)
599 return count_occurrences (SET_SRC (x), find, count_dest);
600 break;
601
602 default:
603 break;
604 }
605
606 format_ptr = GET_RTX_FORMAT (code);
607 count = 0;
608
609 for (i = 0; i < GET_RTX_LENGTH (code); i++)
610 {
611 switch (*format_ptr++)
612 {
613 case 'e':
614 count += count_occurrences (XEXP (x, i), find, count_dest);
615 break;
616
617 case 'E':
618 for (j = 0; j < XVECLEN (x, i); j++)
619 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
620 break;
621 }
622 }
623 return count;
624 }
625
626 \f
627 /* Nonzero if register REG appears somewhere within IN.
628 Also works if REG is not a register; in this case it checks
629 for a subexpression of IN that is Lisp "equal" to REG. */
630
631 int
632 reg_mentioned_p (const_rtx reg, const_rtx in)
633 {
634 const char *fmt;
635 int i;
636 enum rtx_code code;
637
638 if (in == 0)
639 return 0;
640
641 if (reg == in)
642 return 1;
643
644 if (GET_CODE (in) == LABEL_REF)
645 return reg == XEXP (in, 0);
646
647 code = GET_CODE (in);
648
649 switch (code)
650 {
651 /* Compare registers by number. */
652 case REG:
653 return REG_P (reg) && REGNO (in) == REGNO (reg);
654
655 /* These codes have no constituent expressions
656 and are unique. */
657 case SCRATCH:
658 case CC0:
659 case PC:
660 return 0;
661
662 case CONST_INT:
663 case CONST_VECTOR:
664 case CONST_DOUBLE:
665 case CONST_FIXED:
666 /* These are kept unique for a given value. */
667 return 0;
668
669 default:
670 break;
671 }
672
673 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
674 return 1;
675
676 fmt = GET_RTX_FORMAT (code);
677
678 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
679 {
680 if (fmt[i] == 'E')
681 {
682 int j;
683 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
684 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
685 return 1;
686 }
687 else if (fmt[i] == 'e'
688 && reg_mentioned_p (reg, XEXP (in, i)))
689 return 1;
690 }
691 return 0;
692 }
693 \f
694 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
695 no CODE_LABEL insn. */
696
697 int
698 no_labels_between_p (const_rtx beg, const_rtx end)
699 {
700 rtx p;
701 if (beg == end)
702 return 0;
703 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
704 if (LABEL_P (p))
705 return 0;
706 return 1;
707 }
708
709 /* Nonzero if register REG is used in an insn between
710 FROM_INSN and TO_INSN (exclusive of those two). */
711
712 int
713 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
714 {
715 rtx insn;
716
717 if (from_insn == to_insn)
718 return 0;
719
720 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
721 if (INSN_P (insn)
722 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
723 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
724 return 1;
725 return 0;
726 }
727 \f
728 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
729 is entirely replaced by a new value and the only use is as a SET_DEST,
730 we do not consider it a reference. */
731
732 int
733 reg_referenced_p (const_rtx x, const_rtx body)
734 {
735 int i;
736
737 switch (GET_CODE (body))
738 {
739 case SET:
740 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
741 return 1;
742
743 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
744 of a REG that occupies all of the REG, the insn references X if
745 it is mentioned in the destination. */
746 if (GET_CODE (SET_DEST (body)) != CC0
747 && GET_CODE (SET_DEST (body)) != PC
748 && !REG_P (SET_DEST (body))
749 && ! (GET_CODE (SET_DEST (body)) == SUBREG
750 && REG_P (SUBREG_REG (SET_DEST (body)))
751 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
752 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
753 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
754 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
755 && reg_overlap_mentioned_p (x, SET_DEST (body)))
756 return 1;
757 return 0;
758
759 case ASM_OPERANDS:
760 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
761 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
762 return 1;
763 return 0;
764
765 case CALL:
766 case USE:
767 case IF_THEN_ELSE:
768 return reg_overlap_mentioned_p (x, body);
769
770 case TRAP_IF:
771 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
772
773 case PREFETCH:
774 return reg_overlap_mentioned_p (x, XEXP (body, 0));
775
776 case UNSPEC:
777 case UNSPEC_VOLATILE:
778 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
779 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
780 return 1;
781 return 0;
782
783 case PARALLEL:
784 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
785 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
786 return 1;
787 return 0;
788
789 case CLOBBER:
790 if (MEM_P (XEXP (body, 0)))
791 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
792 return 1;
793 return 0;
794
795 case COND_EXEC:
796 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
797 return 1;
798 return reg_referenced_p (x, COND_EXEC_CODE (body));
799
800 default:
801 return 0;
802 }
803 }
804 \f
805 /* Nonzero if register REG is set or clobbered in an insn between
806 FROM_INSN and TO_INSN (exclusive of those two). */
807
808 int
809 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
810 {
811 const_rtx insn;
812
813 if (from_insn == to_insn)
814 return 0;
815
816 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
817 if (INSN_P (insn) && reg_set_p (reg, insn))
818 return 1;
819 return 0;
820 }
821
822 /* Internals of reg_set_between_p. */
823 int
824 reg_set_p (const_rtx reg, const_rtx insn)
825 {
826 /* We can be passed an insn or part of one. If we are passed an insn,
827 check if a side-effect of the insn clobbers REG. */
828 if (INSN_P (insn)
829 && (FIND_REG_INC_NOTE (insn, reg)
830 || (CALL_P (insn)
831 && ((REG_P (reg)
832 && REGNO (reg) < FIRST_PSEUDO_REGISTER
833 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
834 GET_MODE (reg), REGNO (reg)))
835 || MEM_P (reg)
836 || find_reg_fusage (insn, CLOBBER, reg)))))
837 return 1;
838
839 return set_of (reg, insn) != NULL_RTX;
840 }
841
842 /* Similar to reg_set_between_p, but check all registers in X. Return 0
843 only if none of them are modified between START and END. Return 1 if
844 X contains a MEM; this routine does use memory aliasing. */
845
846 int
847 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
848 {
849 const enum rtx_code code = GET_CODE (x);
850 const char *fmt;
851 int i, j;
852 rtx insn;
853
854 if (start == end)
855 return 0;
856
857 switch (code)
858 {
859 case CONST_INT:
860 case CONST_DOUBLE:
861 case CONST_FIXED:
862 case CONST_VECTOR:
863 case CONST:
864 case SYMBOL_REF:
865 case LABEL_REF:
866 return 0;
867
868 case PC:
869 case CC0:
870 return 1;
871
872 case MEM:
873 if (modified_between_p (XEXP (x, 0), start, end))
874 return 1;
875 if (MEM_READONLY_P (x))
876 return 0;
877 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
878 if (memory_modified_in_insn_p (x, insn))
879 return 1;
880 return 0;
881 break;
882
883 case REG:
884 return reg_set_between_p (x, start, end);
885
886 default:
887 break;
888 }
889
890 fmt = GET_RTX_FORMAT (code);
891 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
892 {
893 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
894 return 1;
895
896 else if (fmt[i] == 'E')
897 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
898 if (modified_between_p (XVECEXP (x, i, j), start, end))
899 return 1;
900 }
901
902 return 0;
903 }
904
905 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
906 of them are modified in INSN. Return 1 if X contains a MEM; this routine
907 does use memory aliasing. */
908
909 int
910 modified_in_p (const_rtx x, const_rtx insn)
911 {
912 const enum rtx_code code = GET_CODE (x);
913 const char *fmt;
914 int i, j;
915
916 switch (code)
917 {
918 case CONST_INT:
919 case CONST_DOUBLE:
920 case CONST_FIXED:
921 case CONST_VECTOR:
922 case CONST:
923 case SYMBOL_REF:
924 case LABEL_REF:
925 return 0;
926
927 case PC:
928 case CC0:
929 return 1;
930
931 case MEM:
932 if (modified_in_p (XEXP (x, 0), insn))
933 return 1;
934 if (MEM_READONLY_P (x))
935 return 0;
936 if (memory_modified_in_insn_p (x, insn))
937 return 1;
938 return 0;
939 break;
940
941 case REG:
942 return reg_set_p (x, insn);
943
944 default:
945 break;
946 }
947
948 fmt = GET_RTX_FORMAT (code);
949 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
950 {
951 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
952 return 1;
953
954 else if (fmt[i] == 'E')
955 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
956 if (modified_in_p (XVECEXP (x, i, j), insn))
957 return 1;
958 }
959
960 return 0;
961 }
962 \f
963 /* Helper function for set_of. */
964 struct set_of_data
965 {
966 const_rtx found;
967 const_rtx pat;
968 };
969
970 static void
971 set_of_1 (rtx x, const_rtx pat, void *data1)
972 {
973 struct set_of_data *const data = (struct set_of_data *) (data1);
974 if (rtx_equal_p (x, data->pat)
975 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
976 data->found = pat;
977 }
978
979 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
980 (either directly or via STRICT_LOW_PART and similar modifiers). */
981 const_rtx
982 set_of (const_rtx pat, const_rtx insn)
983 {
984 struct set_of_data data;
985 data.found = NULL_RTX;
986 data.pat = pat;
987 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
988 return data.found;
989 }
990 \f
991 /* Given an INSN, return a SET expression if this insn has only a single SET.
992 It may also have CLOBBERs, USEs, or SET whose output
993 will not be used, which we ignore. */
994
995 rtx
996 single_set_2 (const_rtx insn, const_rtx pat)
997 {
998 rtx set = NULL;
999 int set_verified = 1;
1000 int i;
1001
1002 if (GET_CODE (pat) == PARALLEL)
1003 {
1004 for (i = 0; i < XVECLEN (pat, 0); i++)
1005 {
1006 rtx sub = XVECEXP (pat, 0, i);
1007 switch (GET_CODE (sub))
1008 {
1009 case USE:
1010 case CLOBBER:
1011 break;
1012
1013 case SET:
1014 /* We can consider insns having multiple sets, where all
1015 but one are dead as single set insns. In common case
1016 only single set is present in the pattern so we want
1017 to avoid checking for REG_UNUSED notes unless necessary.
1018
1019 When we reach set first time, we just expect this is
1020 the single set we are looking for and only when more
1021 sets are found in the insn, we check them. */
1022 if (!set_verified)
1023 {
1024 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1025 && !side_effects_p (set))
1026 set = NULL;
1027 else
1028 set_verified = 1;
1029 }
1030 if (!set)
1031 set = sub, set_verified = 0;
1032 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1033 || side_effects_p (sub))
1034 return NULL_RTX;
1035 break;
1036
1037 default:
1038 return NULL_RTX;
1039 }
1040 }
1041 }
1042 return set;
1043 }
1044
1045 /* Given an INSN, return nonzero if it has more than one SET, else return
1046 zero. */
1047
1048 int
1049 multiple_sets (const_rtx insn)
1050 {
1051 int found;
1052 int i;
1053
1054 /* INSN must be an insn. */
1055 if (! INSN_P (insn))
1056 return 0;
1057
1058 /* Only a PARALLEL can have multiple SETs. */
1059 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1060 {
1061 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1062 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1063 {
1064 /* If we have already found a SET, then return now. */
1065 if (found)
1066 return 1;
1067 else
1068 found = 1;
1069 }
1070 }
1071
1072 /* Either zero or one SET. */
1073 return 0;
1074 }
1075 \f
1076 /* Return nonzero if the destination of SET equals the source
1077 and there are no side effects. */
1078
1079 int
1080 set_noop_p (const_rtx set)
1081 {
1082 rtx src = SET_SRC (set);
1083 rtx dst = SET_DEST (set);
1084
1085 if (dst == pc_rtx && src == pc_rtx)
1086 return 1;
1087
1088 if (MEM_P (dst) && MEM_P (src))
1089 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1090
1091 if (GET_CODE (dst) == ZERO_EXTRACT)
1092 return rtx_equal_p (XEXP (dst, 0), src)
1093 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1094 && !side_effects_p (src);
1095
1096 if (GET_CODE (dst) == STRICT_LOW_PART)
1097 dst = XEXP (dst, 0);
1098
1099 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1100 {
1101 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1102 return 0;
1103 src = SUBREG_REG (src);
1104 dst = SUBREG_REG (dst);
1105 }
1106
1107 return (REG_P (src) && REG_P (dst)
1108 && REGNO (src) == REGNO (dst));
1109 }
1110 \f
1111 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1112 value to itself. */
1113
1114 int
1115 noop_move_p (const_rtx insn)
1116 {
1117 rtx pat = PATTERN (insn);
1118
1119 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1120 return 1;
1121
1122 /* Insns carrying these notes are useful later on. */
1123 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1124 return 0;
1125
1126 if (GET_CODE (pat) == SET && set_noop_p (pat))
1127 return 1;
1128
1129 if (GET_CODE (pat) == PARALLEL)
1130 {
1131 int i;
1132 /* If nothing but SETs of registers to themselves,
1133 this insn can also be deleted. */
1134 for (i = 0; i < XVECLEN (pat, 0); i++)
1135 {
1136 rtx tem = XVECEXP (pat, 0, i);
1137
1138 if (GET_CODE (tem) == USE
1139 || GET_CODE (tem) == CLOBBER)
1140 continue;
1141
1142 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1143 return 0;
1144 }
1145
1146 return 1;
1147 }
1148 return 0;
1149 }
1150 \f
1151
1152 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1153 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1154 If the object was modified, if we hit a partial assignment to X, or hit a
1155 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1156 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1157 be the src. */
1158
1159 rtx
1160 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1161 {
1162 rtx p;
1163
1164 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1165 p = PREV_INSN (p))
1166 if (INSN_P (p))
1167 {
1168 rtx set = single_set (p);
1169 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1170
1171 if (set && rtx_equal_p (x, SET_DEST (set)))
1172 {
1173 rtx src = SET_SRC (set);
1174
1175 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1176 src = XEXP (note, 0);
1177
1178 if ((valid_to == NULL_RTX
1179 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1180 /* Reject hard registers because we don't usually want
1181 to use them; we'd rather use a pseudo. */
1182 && (! (REG_P (src)
1183 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1184 {
1185 *pinsn = p;
1186 return src;
1187 }
1188 }
1189
1190 /* If set in non-simple way, we don't have a value. */
1191 if (reg_set_p (x, p))
1192 break;
1193 }
1194
1195 return x;
1196 }
1197 \f
1198 /* Return nonzero if register in range [REGNO, ENDREGNO)
1199 appears either explicitly or implicitly in X
1200 other than being stored into.
1201
1202 References contained within the substructure at LOC do not count.
1203 LOC may be zero, meaning don't ignore anything. */
1204
1205 int
1206 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1207 rtx *loc)
1208 {
1209 int i;
1210 unsigned int x_regno;
1211 RTX_CODE code;
1212 const char *fmt;
1213
1214 repeat:
1215 /* The contents of a REG_NONNEG note is always zero, so we must come here
1216 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1217 if (x == 0)
1218 return 0;
1219
1220 code = GET_CODE (x);
1221
1222 switch (code)
1223 {
1224 case REG:
1225 x_regno = REGNO (x);
1226
1227 /* If we modifying the stack, frame, or argument pointer, it will
1228 clobber a virtual register. In fact, we could be more precise,
1229 but it isn't worth it. */
1230 if ((x_regno == STACK_POINTER_REGNUM
1231 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1232 || x_regno == ARG_POINTER_REGNUM
1233 #endif
1234 || x_regno == FRAME_POINTER_REGNUM)
1235 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1236 return 1;
1237
1238 return endregno > x_regno && regno < END_REGNO (x);
1239
1240 case SUBREG:
1241 /* If this is a SUBREG of a hard reg, we can see exactly which
1242 registers are being modified. Otherwise, handle normally. */
1243 if (REG_P (SUBREG_REG (x))
1244 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1245 {
1246 unsigned int inner_regno = subreg_regno (x);
1247 unsigned int inner_endregno
1248 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1249 ? subreg_nregs (x) : 1);
1250
1251 return endregno > inner_regno && regno < inner_endregno;
1252 }
1253 break;
1254
1255 case CLOBBER:
1256 case SET:
1257 if (&SET_DEST (x) != loc
1258 /* Note setting a SUBREG counts as referring to the REG it is in for
1259 a pseudo but not for hard registers since we can
1260 treat each word individually. */
1261 && ((GET_CODE (SET_DEST (x)) == SUBREG
1262 && loc != &SUBREG_REG (SET_DEST (x))
1263 && REG_P (SUBREG_REG (SET_DEST (x)))
1264 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1265 && refers_to_regno_p (regno, endregno,
1266 SUBREG_REG (SET_DEST (x)), loc))
1267 || (!REG_P (SET_DEST (x))
1268 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1269 return 1;
1270
1271 if (code == CLOBBER || loc == &SET_SRC (x))
1272 return 0;
1273 x = SET_SRC (x);
1274 goto repeat;
1275
1276 default:
1277 break;
1278 }
1279
1280 /* X does not match, so try its subexpressions. */
1281
1282 fmt = GET_RTX_FORMAT (code);
1283 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1284 {
1285 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1286 {
1287 if (i == 0)
1288 {
1289 x = XEXP (x, 0);
1290 goto repeat;
1291 }
1292 else
1293 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1294 return 1;
1295 }
1296 else if (fmt[i] == 'E')
1297 {
1298 int j;
1299 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1300 if (loc != &XVECEXP (x, i, j)
1301 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1302 return 1;
1303 }
1304 }
1305 return 0;
1306 }
1307
1308 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1309 we check if any register number in X conflicts with the relevant register
1310 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1311 contains a MEM (we don't bother checking for memory addresses that can't
1312 conflict because we expect this to be a rare case. */
1313
1314 int
1315 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1316 {
1317 unsigned int regno, endregno;
1318
1319 /* If either argument is a constant, then modifying X can not
1320 affect IN. Here we look at IN, we can profitably combine
1321 CONSTANT_P (x) with the switch statement below. */
1322 if (CONSTANT_P (in))
1323 return 0;
1324
1325 recurse:
1326 switch (GET_CODE (x))
1327 {
1328 case STRICT_LOW_PART:
1329 case ZERO_EXTRACT:
1330 case SIGN_EXTRACT:
1331 /* Overly conservative. */
1332 x = XEXP (x, 0);
1333 goto recurse;
1334
1335 case SUBREG:
1336 regno = REGNO (SUBREG_REG (x));
1337 if (regno < FIRST_PSEUDO_REGISTER)
1338 regno = subreg_regno (x);
1339 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1340 ? subreg_nregs (x) : 1);
1341 goto do_reg;
1342
1343 case REG:
1344 regno = REGNO (x);
1345 endregno = END_REGNO (x);
1346 do_reg:
1347 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1348
1349 case MEM:
1350 {
1351 const char *fmt;
1352 int i;
1353
1354 if (MEM_P (in))
1355 return 1;
1356
1357 fmt = GET_RTX_FORMAT (GET_CODE (in));
1358 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1359 if (fmt[i] == 'e')
1360 {
1361 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1362 return 1;
1363 }
1364 else if (fmt[i] == 'E')
1365 {
1366 int j;
1367 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1368 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1369 return 1;
1370 }
1371
1372 return 0;
1373 }
1374
1375 case SCRATCH:
1376 case PC:
1377 case CC0:
1378 return reg_mentioned_p (x, in);
1379
1380 case PARALLEL:
1381 {
1382 int i;
1383
1384 /* If any register in here refers to it we return true. */
1385 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1386 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1387 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1388 return 1;
1389 return 0;
1390 }
1391
1392 default:
1393 gcc_assert (CONSTANT_P (x));
1394 return 0;
1395 }
1396 }
1397 \f
1398 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1399 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1400 ignored by note_stores, but passed to FUN.
1401
1402 FUN receives three arguments:
1403 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1404 2. the SET or CLOBBER rtx that does the store,
1405 3. the pointer DATA provided to note_stores.
1406
1407 If the item being stored in or clobbered is a SUBREG of a hard register,
1408 the SUBREG will be passed. */
1409
1410 void
1411 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1412 {
1413 int i;
1414
1415 if (GET_CODE (x) == COND_EXEC)
1416 x = COND_EXEC_CODE (x);
1417
1418 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1419 {
1420 rtx dest = SET_DEST (x);
1421
1422 while ((GET_CODE (dest) == SUBREG
1423 && (!REG_P (SUBREG_REG (dest))
1424 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1425 || GET_CODE (dest) == ZERO_EXTRACT
1426 || GET_CODE (dest) == STRICT_LOW_PART)
1427 dest = XEXP (dest, 0);
1428
1429 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1430 each of whose first operand is a register. */
1431 if (GET_CODE (dest) == PARALLEL)
1432 {
1433 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1434 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1435 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1436 }
1437 else
1438 (*fun) (dest, x, data);
1439 }
1440
1441 else if (GET_CODE (x) == PARALLEL)
1442 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1443 note_stores (XVECEXP (x, 0, i), fun, data);
1444 }
1445 \f
1446 /* Like notes_stores, but call FUN for each expression that is being
1447 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1448 FUN for each expression, not any interior subexpressions. FUN receives a
1449 pointer to the expression and the DATA passed to this function.
1450
1451 Note that this is not quite the same test as that done in reg_referenced_p
1452 since that considers something as being referenced if it is being
1453 partially set, while we do not. */
1454
1455 void
1456 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1457 {
1458 rtx body = *pbody;
1459 int i;
1460
1461 switch (GET_CODE (body))
1462 {
1463 case COND_EXEC:
1464 (*fun) (&COND_EXEC_TEST (body), data);
1465 note_uses (&COND_EXEC_CODE (body), fun, data);
1466 return;
1467
1468 case PARALLEL:
1469 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1470 note_uses (&XVECEXP (body, 0, i), fun, data);
1471 return;
1472
1473 case SEQUENCE:
1474 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1475 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1476 return;
1477
1478 case USE:
1479 (*fun) (&XEXP (body, 0), data);
1480 return;
1481
1482 case ASM_OPERANDS:
1483 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1484 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1485 return;
1486
1487 case TRAP_IF:
1488 (*fun) (&TRAP_CONDITION (body), data);
1489 return;
1490
1491 case PREFETCH:
1492 (*fun) (&XEXP (body, 0), data);
1493 return;
1494
1495 case UNSPEC:
1496 case UNSPEC_VOLATILE:
1497 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1498 (*fun) (&XVECEXP (body, 0, i), data);
1499 return;
1500
1501 case CLOBBER:
1502 if (MEM_P (XEXP (body, 0)))
1503 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1504 return;
1505
1506 case SET:
1507 {
1508 rtx dest = SET_DEST (body);
1509
1510 /* For sets we replace everything in source plus registers in memory
1511 expression in store and operands of a ZERO_EXTRACT. */
1512 (*fun) (&SET_SRC (body), data);
1513
1514 if (GET_CODE (dest) == ZERO_EXTRACT)
1515 {
1516 (*fun) (&XEXP (dest, 1), data);
1517 (*fun) (&XEXP (dest, 2), data);
1518 }
1519
1520 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1521 dest = XEXP (dest, 0);
1522
1523 if (MEM_P (dest))
1524 (*fun) (&XEXP (dest, 0), data);
1525 }
1526 return;
1527
1528 default:
1529 /* All the other possibilities never store. */
1530 (*fun) (pbody, data);
1531 return;
1532 }
1533 }
1534 \f
1535 /* Return nonzero if X's old contents don't survive after INSN.
1536 This will be true if X is (cc0) or if X is a register and
1537 X dies in INSN or because INSN entirely sets X.
1538
1539 "Entirely set" means set directly and not through a SUBREG, or
1540 ZERO_EXTRACT, so no trace of the old contents remains.
1541 Likewise, REG_INC does not count.
1542
1543 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1544 but for this use that makes no difference, since regs don't overlap
1545 during their lifetimes. Therefore, this function may be used
1546 at any time after deaths have been computed.
1547
1548 If REG is a hard reg that occupies multiple machine registers, this
1549 function will only return 1 if each of those registers will be replaced
1550 by INSN. */
1551
1552 int
1553 dead_or_set_p (const_rtx insn, const_rtx x)
1554 {
1555 unsigned int regno, end_regno;
1556 unsigned int i;
1557
1558 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1559 if (GET_CODE (x) == CC0)
1560 return 1;
1561
1562 gcc_assert (REG_P (x));
1563
1564 regno = REGNO (x);
1565 end_regno = END_REGNO (x);
1566 for (i = regno; i < end_regno; i++)
1567 if (! dead_or_set_regno_p (insn, i))
1568 return 0;
1569
1570 return 1;
1571 }
1572
1573 /* Return TRUE iff DEST is a register or subreg of a register and
1574 doesn't change the number of words of the inner register, and any
1575 part of the register is TEST_REGNO. */
1576
1577 static bool
1578 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1579 {
1580 unsigned int regno, endregno;
1581
1582 if (GET_CODE (dest) == SUBREG
1583 && (((GET_MODE_SIZE (GET_MODE (dest))
1584 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1585 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1586 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1587 dest = SUBREG_REG (dest);
1588
1589 if (!REG_P (dest))
1590 return false;
1591
1592 regno = REGNO (dest);
1593 endregno = END_REGNO (dest);
1594 return (test_regno >= regno && test_regno < endregno);
1595 }
1596
1597 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1598 any member matches the covers_regno_no_parallel_p criteria. */
1599
1600 static bool
1601 covers_regno_p (const_rtx dest, unsigned int test_regno)
1602 {
1603 if (GET_CODE (dest) == PARALLEL)
1604 {
1605 /* Some targets place small structures in registers for return
1606 values of functions, and those registers are wrapped in
1607 PARALLELs that we may see as the destination of a SET. */
1608 int i;
1609
1610 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1611 {
1612 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1613 if (inner != NULL_RTX
1614 && covers_regno_no_parallel_p (inner, test_regno))
1615 return true;
1616 }
1617
1618 return false;
1619 }
1620 else
1621 return covers_regno_no_parallel_p (dest, test_regno);
1622 }
1623
1624 /* Utility function for dead_or_set_p to check an individual register. */
1625
1626 int
1627 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1628 {
1629 const_rtx pattern;
1630
1631 /* See if there is a death note for something that includes TEST_REGNO. */
1632 if (find_regno_note (insn, REG_DEAD, test_regno))
1633 return 1;
1634
1635 if (CALL_P (insn)
1636 && find_regno_fusage (insn, CLOBBER, test_regno))
1637 return 1;
1638
1639 pattern = PATTERN (insn);
1640
1641 if (GET_CODE (pattern) == COND_EXEC)
1642 pattern = COND_EXEC_CODE (pattern);
1643
1644 if (GET_CODE (pattern) == SET)
1645 return covers_regno_p (SET_DEST (pattern), test_regno);
1646 else if (GET_CODE (pattern) == PARALLEL)
1647 {
1648 int i;
1649
1650 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1651 {
1652 rtx body = XVECEXP (pattern, 0, i);
1653
1654 if (GET_CODE (body) == COND_EXEC)
1655 body = COND_EXEC_CODE (body);
1656
1657 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1658 && covers_regno_p (SET_DEST (body), test_regno))
1659 return 1;
1660 }
1661 }
1662
1663 return 0;
1664 }
1665
1666 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1667 If DATUM is nonzero, look for one whose datum is DATUM. */
1668
1669 rtx
1670 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1671 {
1672 rtx link;
1673
1674 gcc_assert (insn);
1675
1676 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1677 if (! INSN_P (insn))
1678 return 0;
1679 if (datum == 0)
1680 {
1681 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1682 if (REG_NOTE_KIND (link) == kind)
1683 return link;
1684 return 0;
1685 }
1686
1687 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1688 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1689 return link;
1690 return 0;
1691 }
1692
1693 /* Return the reg-note of kind KIND in insn INSN which applies to register
1694 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1695 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1696 it might be the case that the note overlaps REGNO. */
1697
1698 rtx
1699 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1700 {
1701 rtx link;
1702
1703 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1704 if (! INSN_P (insn))
1705 return 0;
1706
1707 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1708 if (REG_NOTE_KIND (link) == kind
1709 /* Verify that it is a register, so that scratch and MEM won't cause a
1710 problem here. */
1711 && REG_P (XEXP (link, 0))
1712 && REGNO (XEXP (link, 0)) <= regno
1713 && END_REGNO (XEXP (link, 0)) > regno)
1714 return link;
1715 return 0;
1716 }
1717
1718 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1719 has such a note. */
1720
1721 rtx
1722 find_reg_equal_equiv_note (const_rtx insn)
1723 {
1724 rtx link;
1725
1726 if (!INSN_P (insn))
1727 return 0;
1728
1729 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1730 if (REG_NOTE_KIND (link) == REG_EQUAL
1731 || REG_NOTE_KIND (link) == REG_EQUIV)
1732 {
1733 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1734 insns that have multiple sets. Checking single_set to
1735 make sure of this is not the proper check, as explained
1736 in the comment in set_unique_reg_note.
1737
1738 This should be changed into an assert. */
1739 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1740 return 0;
1741 return link;
1742 }
1743 return NULL;
1744 }
1745
1746 /* Check whether INSN is a single_set whose source is known to be
1747 equivalent to a constant. Return that constant if so, otherwise
1748 return null. */
1749
1750 rtx
1751 find_constant_src (const_rtx insn)
1752 {
1753 rtx note, set, x;
1754
1755 set = single_set (insn);
1756 if (set)
1757 {
1758 x = avoid_constant_pool_reference (SET_SRC (set));
1759 if (CONSTANT_P (x))
1760 return x;
1761 }
1762
1763 note = find_reg_equal_equiv_note (insn);
1764 if (note && CONSTANT_P (XEXP (note, 0)))
1765 return XEXP (note, 0);
1766
1767 return NULL_RTX;
1768 }
1769
1770 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1771 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1772
1773 int
1774 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1775 {
1776 /* If it's not a CALL_INSN, it can't possibly have a
1777 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1778 if (!CALL_P (insn))
1779 return 0;
1780
1781 gcc_assert (datum);
1782
1783 if (!REG_P (datum))
1784 {
1785 rtx link;
1786
1787 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1788 link;
1789 link = XEXP (link, 1))
1790 if (GET_CODE (XEXP (link, 0)) == code
1791 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1792 return 1;
1793 }
1794 else
1795 {
1796 unsigned int regno = REGNO (datum);
1797
1798 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1799 to pseudo registers, so don't bother checking. */
1800
1801 if (regno < FIRST_PSEUDO_REGISTER)
1802 {
1803 unsigned int end_regno = END_HARD_REGNO (datum);
1804 unsigned int i;
1805
1806 for (i = regno; i < end_regno; i++)
1807 if (find_regno_fusage (insn, code, i))
1808 return 1;
1809 }
1810 }
1811
1812 return 0;
1813 }
1814
1815 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1816 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1817
1818 int
1819 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1820 {
1821 rtx link;
1822
1823 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1824 to pseudo registers, so don't bother checking. */
1825
1826 if (regno >= FIRST_PSEUDO_REGISTER
1827 || !CALL_P (insn) )
1828 return 0;
1829
1830 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1831 {
1832 rtx op, reg;
1833
1834 if (GET_CODE (op = XEXP (link, 0)) == code
1835 && REG_P (reg = XEXP (op, 0))
1836 && REGNO (reg) <= regno
1837 && END_HARD_REGNO (reg) > regno)
1838 return 1;
1839 }
1840
1841 return 0;
1842 }
1843
1844 \f
1845 /* Remove register note NOTE from the REG_NOTES of INSN. */
1846
1847 void
1848 remove_note (rtx insn, const_rtx note)
1849 {
1850 rtx link;
1851
1852 if (note == NULL_RTX)
1853 return;
1854
1855 if (REG_NOTES (insn) == note)
1856 REG_NOTES (insn) = XEXP (note, 1);
1857 else
1858 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1859 if (XEXP (link, 1) == note)
1860 {
1861 XEXP (link, 1) = XEXP (note, 1);
1862 break;
1863 }
1864
1865 switch (REG_NOTE_KIND (note))
1866 {
1867 case REG_EQUAL:
1868 case REG_EQUIV:
1869 df_notes_rescan (insn);
1870 break;
1871 default:
1872 break;
1873 }
1874 }
1875
1876 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1877
1878 void
1879 remove_reg_equal_equiv_notes (rtx insn)
1880 {
1881 rtx *loc;
1882
1883 loc = &REG_NOTES (insn);
1884 while (*loc)
1885 {
1886 enum reg_note kind = REG_NOTE_KIND (*loc);
1887 if (kind == REG_EQUAL || kind == REG_EQUIV)
1888 *loc = XEXP (*loc, 1);
1889 else
1890 loc = &XEXP (*loc, 1);
1891 }
1892 }
1893
1894 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1895 return 1 if it is found. A simple equality test is used to determine if
1896 NODE matches. */
1897
1898 int
1899 in_expr_list_p (const_rtx listp, const_rtx node)
1900 {
1901 const_rtx x;
1902
1903 for (x = listp; x; x = XEXP (x, 1))
1904 if (node == XEXP (x, 0))
1905 return 1;
1906
1907 return 0;
1908 }
1909
1910 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1911 remove that entry from the list if it is found.
1912
1913 A simple equality test is used to determine if NODE matches. */
1914
1915 void
1916 remove_node_from_expr_list (const_rtx node, rtx *listp)
1917 {
1918 rtx temp = *listp;
1919 rtx prev = NULL_RTX;
1920
1921 while (temp)
1922 {
1923 if (node == XEXP (temp, 0))
1924 {
1925 /* Splice the node out of the list. */
1926 if (prev)
1927 XEXP (prev, 1) = XEXP (temp, 1);
1928 else
1929 *listp = XEXP (temp, 1);
1930
1931 return;
1932 }
1933
1934 prev = temp;
1935 temp = XEXP (temp, 1);
1936 }
1937 }
1938 \f
1939 /* Nonzero if X contains any volatile instructions. These are instructions
1940 which may cause unpredictable machine state instructions, and thus no
1941 instructions should be moved or combined across them. This includes
1942 only volatile asms and UNSPEC_VOLATILE instructions. */
1943
1944 int
1945 volatile_insn_p (const_rtx x)
1946 {
1947 const RTX_CODE code = GET_CODE (x);
1948 switch (code)
1949 {
1950 case LABEL_REF:
1951 case SYMBOL_REF:
1952 case CONST_INT:
1953 case CONST:
1954 case CONST_DOUBLE:
1955 case CONST_FIXED:
1956 case CONST_VECTOR:
1957 case CC0:
1958 case PC:
1959 case REG:
1960 case SCRATCH:
1961 case CLOBBER:
1962 case ADDR_VEC:
1963 case ADDR_DIFF_VEC:
1964 case CALL:
1965 case MEM:
1966 return 0;
1967
1968 case UNSPEC_VOLATILE:
1969 /* case TRAP_IF: This isn't clear yet. */
1970 return 1;
1971
1972 case ASM_INPUT:
1973 case ASM_OPERANDS:
1974 if (MEM_VOLATILE_P (x))
1975 return 1;
1976
1977 default:
1978 break;
1979 }
1980
1981 /* Recursively scan the operands of this expression. */
1982
1983 {
1984 const char *const fmt = GET_RTX_FORMAT (code);
1985 int i;
1986
1987 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1988 {
1989 if (fmt[i] == 'e')
1990 {
1991 if (volatile_insn_p (XEXP (x, i)))
1992 return 1;
1993 }
1994 else if (fmt[i] == 'E')
1995 {
1996 int j;
1997 for (j = 0; j < XVECLEN (x, i); j++)
1998 if (volatile_insn_p (XVECEXP (x, i, j)))
1999 return 1;
2000 }
2001 }
2002 }
2003 return 0;
2004 }
2005
2006 /* Nonzero if X contains any volatile memory references
2007 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2008
2009 int
2010 volatile_refs_p (const_rtx x)
2011 {
2012 const RTX_CODE code = GET_CODE (x);
2013 switch (code)
2014 {
2015 case LABEL_REF:
2016 case SYMBOL_REF:
2017 case CONST_INT:
2018 case CONST:
2019 case CONST_DOUBLE:
2020 case CONST_FIXED:
2021 case CONST_VECTOR:
2022 case CC0:
2023 case PC:
2024 case REG:
2025 case SCRATCH:
2026 case CLOBBER:
2027 case ADDR_VEC:
2028 case ADDR_DIFF_VEC:
2029 return 0;
2030
2031 case UNSPEC_VOLATILE:
2032 return 1;
2033
2034 case MEM:
2035 case ASM_INPUT:
2036 case ASM_OPERANDS:
2037 if (MEM_VOLATILE_P (x))
2038 return 1;
2039
2040 default:
2041 break;
2042 }
2043
2044 /* Recursively scan the operands of this expression. */
2045
2046 {
2047 const char *const fmt = GET_RTX_FORMAT (code);
2048 int i;
2049
2050 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2051 {
2052 if (fmt[i] == 'e')
2053 {
2054 if (volatile_refs_p (XEXP (x, i)))
2055 return 1;
2056 }
2057 else if (fmt[i] == 'E')
2058 {
2059 int j;
2060 for (j = 0; j < XVECLEN (x, i); j++)
2061 if (volatile_refs_p (XVECEXP (x, i, j)))
2062 return 1;
2063 }
2064 }
2065 }
2066 return 0;
2067 }
2068
2069 /* Similar to above, except that it also rejects register pre- and post-
2070 incrementing. */
2071
2072 int
2073 side_effects_p (const_rtx x)
2074 {
2075 const RTX_CODE code = GET_CODE (x);
2076 switch (code)
2077 {
2078 case LABEL_REF:
2079 case SYMBOL_REF:
2080 case CONST_INT:
2081 case CONST:
2082 case CONST_DOUBLE:
2083 case CONST_FIXED:
2084 case CONST_VECTOR:
2085 case CC0:
2086 case PC:
2087 case REG:
2088 case SCRATCH:
2089 case ADDR_VEC:
2090 case ADDR_DIFF_VEC:
2091 return 0;
2092
2093 case CLOBBER:
2094 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2095 when some combination can't be done. If we see one, don't think
2096 that we can simplify the expression. */
2097 return (GET_MODE (x) != VOIDmode);
2098
2099 case PRE_INC:
2100 case PRE_DEC:
2101 case POST_INC:
2102 case POST_DEC:
2103 case PRE_MODIFY:
2104 case POST_MODIFY:
2105 case CALL:
2106 case UNSPEC_VOLATILE:
2107 /* case TRAP_IF: This isn't clear yet. */
2108 return 1;
2109
2110 case MEM:
2111 case ASM_INPUT:
2112 case ASM_OPERANDS:
2113 if (MEM_VOLATILE_P (x))
2114 return 1;
2115
2116 default:
2117 break;
2118 }
2119
2120 /* Recursively scan the operands of this expression. */
2121
2122 {
2123 const char *fmt = GET_RTX_FORMAT (code);
2124 int i;
2125
2126 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2127 {
2128 if (fmt[i] == 'e')
2129 {
2130 if (side_effects_p (XEXP (x, i)))
2131 return 1;
2132 }
2133 else if (fmt[i] == 'E')
2134 {
2135 int j;
2136 for (j = 0; j < XVECLEN (x, i); j++)
2137 if (side_effects_p (XVECEXP (x, i, j)))
2138 return 1;
2139 }
2140 }
2141 }
2142 return 0;
2143 }
2144 \f
2145 enum may_trap_p_flags
2146 {
2147 MTP_UNALIGNED_MEMS = 1,
2148 MTP_AFTER_MOVE = 2
2149 };
2150 /* Return nonzero if evaluating rtx X might cause a trap.
2151 (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
2152 unaligned memory accesses on strict alignment machines. If
2153 (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
2154 cannot trap at its current location, but it might become trapping if moved
2155 elsewhere. */
2156
2157 int
2158 may_trap_p_1 (const_rtx x, unsigned flags)
2159 {
2160 int i;
2161 enum rtx_code code;
2162 const char *fmt;
2163 bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
2164
2165 if (x == 0)
2166 return 0;
2167 code = GET_CODE (x);
2168 switch (code)
2169 {
2170 /* Handle these cases quickly. */
2171 case CONST_INT:
2172 case CONST_DOUBLE:
2173 case CONST_FIXED:
2174 case CONST_VECTOR:
2175 case SYMBOL_REF:
2176 case LABEL_REF:
2177 case CONST:
2178 case PC:
2179 case CC0:
2180 case REG:
2181 case SCRATCH:
2182 return 0;
2183
2184 case UNSPEC:
2185 case UNSPEC_VOLATILE:
2186 return targetm.unspec_may_trap_p (x, flags);
2187
2188 case ASM_INPUT:
2189 case TRAP_IF:
2190 return 1;
2191
2192 case ASM_OPERANDS:
2193 return MEM_VOLATILE_P (x);
2194
2195 /* Memory ref can trap unless it's a static var or a stack slot. */
2196 case MEM:
2197 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2198 reference; moving it out of condition might cause its address
2199 become invalid. */
2200 !(flags & MTP_AFTER_MOVE)
2201 && MEM_NOTRAP_P (x)
2202 && (!STRICT_ALIGNMENT || !unaligned_mems))
2203 return 0;
2204 return
2205 rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
2206
2207 /* Division by a non-constant might trap. */
2208 case DIV:
2209 case MOD:
2210 case UDIV:
2211 case UMOD:
2212 if (HONOR_SNANS (GET_MODE (x)))
2213 return 1;
2214 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2215 return flag_trapping_math;
2216 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2217 return 1;
2218 break;
2219
2220 case EXPR_LIST:
2221 /* An EXPR_LIST is used to represent a function call. This
2222 certainly may trap. */
2223 return 1;
2224
2225 case GE:
2226 case GT:
2227 case LE:
2228 case LT:
2229 case LTGT:
2230 case COMPARE:
2231 /* Some floating point comparisons may trap. */
2232 if (!flag_trapping_math)
2233 break;
2234 /* ??? There is no machine independent way to check for tests that trap
2235 when COMPARE is used, though many targets do make this distinction.
2236 For instance, sparc uses CCFPE for compares which generate exceptions
2237 and CCFP for compares which do not generate exceptions. */
2238 if (HONOR_NANS (GET_MODE (x)))
2239 return 1;
2240 /* But often the compare has some CC mode, so check operand
2241 modes as well. */
2242 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2243 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2244 return 1;
2245 break;
2246
2247 case EQ:
2248 case NE:
2249 if (HONOR_SNANS (GET_MODE (x)))
2250 return 1;
2251 /* Often comparison is CC mode, so check operand modes. */
2252 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2253 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2254 return 1;
2255 break;
2256
2257 case FIX:
2258 /* Conversion of floating point might trap. */
2259 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2260 return 1;
2261 break;
2262
2263 case NEG:
2264 case ABS:
2265 case SUBREG:
2266 /* These operations don't trap even with floating point. */
2267 break;
2268
2269 default:
2270 /* Any floating arithmetic may trap. */
2271 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2272 && flag_trapping_math)
2273 return 1;
2274 }
2275
2276 fmt = GET_RTX_FORMAT (code);
2277 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2278 {
2279 if (fmt[i] == 'e')
2280 {
2281 if (may_trap_p_1 (XEXP (x, i), flags))
2282 return 1;
2283 }
2284 else if (fmt[i] == 'E')
2285 {
2286 int j;
2287 for (j = 0; j < XVECLEN (x, i); j++)
2288 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2289 return 1;
2290 }
2291 }
2292 return 0;
2293 }
2294
2295 /* Return nonzero if evaluating rtx X might cause a trap. */
2296
2297 int
2298 may_trap_p (const_rtx x)
2299 {
2300 return may_trap_p_1 (x, 0);
2301 }
2302
2303 /* Return nonzero if evaluating rtx X might cause a trap, when the expression
2304 is moved from its current location by some optimization. */
2305
2306 int
2307 may_trap_after_code_motion_p (const_rtx x)
2308 {
2309 return may_trap_p_1 (x, MTP_AFTER_MOVE);
2310 }
2311
2312 /* Same as above, but additionally return nonzero if evaluating rtx X might
2313 cause a fault. We define a fault for the purpose of this function as a
2314 erroneous execution condition that cannot be encountered during the normal
2315 execution of a valid program; the typical example is an unaligned memory
2316 access on a strict alignment machine. The compiler guarantees that it
2317 doesn't generate code that will fault from a valid program, but this
2318 guarantee doesn't mean anything for individual instructions. Consider
2319 the following example:
2320
2321 struct S { int d; union { char *cp; int *ip; }; };
2322
2323 int foo(struct S *s)
2324 {
2325 if (s->d == 1)
2326 return *s->ip;
2327 else
2328 return *s->cp;
2329 }
2330
2331 on a strict alignment machine. In a valid program, foo will never be
2332 invoked on a structure for which d is equal to 1 and the underlying
2333 unique field of the union not aligned on a 4-byte boundary, but the
2334 expression *s->ip might cause a fault if considered individually.
2335
2336 At the RTL level, potentially problematic expressions will almost always
2337 verify may_trap_p; for example, the above dereference can be emitted as
2338 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2339 However, suppose that foo is inlined in a caller that causes s->cp to
2340 point to a local character variable and guarantees that s->d is not set
2341 to 1; foo may have been effectively translated into pseudo-RTL as:
2342
2343 if ((reg:SI) == 1)
2344 (set (reg:SI) (mem:SI (%fp - 7)))
2345 else
2346 (set (reg:QI) (mem:QI (%fp - 7)))
2347
2348 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2349 memory reference to a stack slot, but it will certainly cause a fault
2350 on a strict alignment machine. */
2351
2352 int
2353 may_trap_or_fault_p (const_rtx x)
2354 {
2355 return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
2356 }
2357 \f
2358 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2359 i.e., an inequality. */
2360
2361 int
2362 inequality_comparisons_p (const_rtx x)
2363 {
2364 const char *fmt;
2365 int len, i;
2366 const enum rtx_code code = GET_CODE (x);
2367
2368 switch (code)
2369 {
2370 case REG:
2371 case SCRATCH:
2372 case PC:
2373 case CC0:
2374 case CONST_INT:
2375 case CONST_DOUBLE:
2376 case CONST_FIXED:
2377 case CONST_VECTOR:
2378 case CONST:
2379 case LABEL_REF:
2380 case SYMBOL_REF:
2381 return 0;
2382
2383 case LT:
2384 case LTU:
2385 case GT:
2386 case GTU:
2387 case LE:
2388 case LEU:
2389 case GE:
2390 case GEU:
2391 return 1;
2392
2393 default:
2394 break;
2395 }
2396
2397 len = GET_RTX_LENGTH (code);
2398 fmt = GET_RTX_FORMAT (code);
2399
2400 for (i = 0; i < len; i++)
2401 {
2402 if (fmt[i] == 'e')
2403 {
2404 if (inequality_comparisons_p (XEXP (x, i)))
2405 return 1;
2406 }
2407 else if (fmt[i] == 'E')
2408 {
2409 int j;
2410 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2411 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2412 return 1;
2413 }
2414 }
2415
2416 return 0;
2417 }
2418 \f
2419 /* Replace any occurrence of FROM in X with TO. The function does
2420 not enter into CONST_DOUBLE for the replace.
2421
2422 Note that copying is not done so X must not be shared unless all copies
2423 are to be modified. */
2424
2425 rtx
2426 replace_rtx (rtx x, rtx from, rtx to)
2427 {
2428 int i, j;
2429 const char *fmt;
2430
2431 /* The following prevents loops occurrence when we change MEM in
2432 CONST_DOUBLE onto the same CONST_DOUBLE. */
2433 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2434 return x;
2435
2436 if (x == from)
2437 return to;
2438
2439 /* Allow this function to make replacements in EXPR_LISTs. */
2440 if (x == 0)
2441 return 0;
2442
2443 if (GET_CODE (x) == SUBREG)
2444 {
2445 rtx new = replace_rtx (SUBREG_REG (x), from, to);
2446
2447 if (GET_CODE (new) == CONST_INT)
2448 {
2449 x = simplify_subreg (GET_MODE (x), new,
2450 GET_MODE (SUBREG_REG (x)),
2451 SUBREG_BYTE (x));
2452 gcc_assert (x);
2453 }
2454 else
2455 SUBREG_REG (x) = new;
2456
2457 return x;
2458 }
2459 else if (GET_CODE (x) == ZERO_EXTEND)
2460 {
2461 rtx new = replace_rtx (XEXP (x, 0), from, to);
2462
2463 if (GET_CODE (new) == CONST_INT)
2464 {
2465 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2466 new, GET_MODE (XEXP (x, 0)));
2467 gcc_assert (x);
2468 }
2469 else
2470 XEXP (x, 0) = new;
2471
2472 return x;
2473 }
2474
2475 fmt = GET_RTX_FORMAT (GET_CODE (x));
2476 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2477 {
2478 if (fmt[i] == 'e')
2479 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2480 else if (fmt[i] == 'E')
2481 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2482 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2483 }
2484
2485 return x;
2486 }
2487 \f
2488 /* Replace occurrences of the old label in *X with the new one.
2489 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2490
2491 int
2492 replace_label (rtx *x, void *data)
2493 {
2494 rtx l = *x;
2495 rtx old_label = ((replace_label_data *) data)->r1;
2496 rtx new_label = ((replace_label_data *) data)->r2;
2497 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2498
2499 if (l == NULL_RTX)
2500 return 0;
2501
2502 if (GET_CODE (l) == SYMBOL_REF
2503 && CONSTANT_POOL_ADDRESS_P (l))
2504 {
2505 rtx c = get_pool_constant (l);
2506 if (rtx_referenced_p (old_label, c))
2507 {
2508 rtx new_c, new_l;
2509 replace_label_data *d = (replace_label_data *) data;
2510
2511 /* Create a copy of constant C; replace the label inside
2512 but do not update LABEL_NUSES because uses in constant pool
2513 are not counted. */
2514 new_c = copy_rtx (c);
2515 d->update_label_nuses = false;
2516 for_each_rtx (&new_c, replace_label, data);
2517 d->update_label_nuses = update_label_nuses;
2518
2519 /* Add the new constant NEW_C to constant pool and replace
2520 the old reference to constant by new reference. */
2521 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2522 *x = replace_rtx (l, l, new_l);
2523 }
2524 return 0;
2525 }
2526
2527 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2528 field. This is not handled by for_each_rtx because it doesn't
2529 handle unprinted ('0') fields. */
2530 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2531 JUMP_LABEL (l) = new_label;
2532
2533 if ((GET_CODE (l) == LABEL_REF
2534 || GET_CODE (l) == INSN_LIST)
2535 && XEXP (l, 0) == old_label)
2536 {
2537 XEXP (l, 0) = new_label;
2538 if (update_label_nuses)
2539 {
2540 ++LABEL_NUSES (new_label);
2541 --LABEL_NUSES (old_label);
2542 }
2543 return 0;
2544 }
2545
2546 return 0;
2547 }
2548
2549 /* When *BODY is equal to X or X is directly referenced by *BODY
2550 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2551 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2552
2553 static int
2554 rtx_referenced_p_1 (rtx *body, void *x)
2555 {
2556 rtx y = (rtx) x;
2557
2558 if (*body == NULL_RTX)
2559 return y == NULL_RTX;
2560
2561 /* Return true if a label_ref *BODY refers to label Y. */
2562 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2563 return XEXP (*body, 0) == y;
2564
2565 /* If *BODY is a reference to pool constant traverse the constant. */
2566 if (GET_CODE (*body) == SYMBOL_REF
2567 && CONSTANT_POOL_ADDRESS_P (*body))
2568 return rtx_referenced_p (y, get_pool_constant (*body));
2569
2570 /* By default, compare the RTL expressions. */
2571 return rtx_equal_p (*body, y);
2572 }
2573
2574 /* Return true if X is referenced in BODY. */
2575
2576 int
2577 rtx_referenced_p (rtx x, rtx body)
2578 {
2579 return for_each_rtx (&body, rtx_referenced_p_1, x);
2580 }
2581
2582 /* If INSN is a tablejump return true and store the label (before jump table) to
2583 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2584
2585 bool
2586 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2587 {
2588 rtx label, table;
2589
2590 if (JUMP_P (insn)
2591 && (label = JUMP_LABEL (insn)) != NULL_RTX
2592 && (table = next_active_insn (label)) != NULL_RTX
2593 && JUMP_P (table)
2594 && (GET_CODE (PATTERN (table)) == ADDR_VEC
2595 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2596 {
2597 if (labelp)
2598 *labelp = label;
2599 if (tablep)
2600 *tablep = table;
2601 return true;
2602 }
2603 return false;
2604 }
2605
2606 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2607 constant that is not in the constant pool and not in the condition
2608 of an IF_THEN_ELSE. */
2609
2610 static int
2611 computed_jump_p_1 (const_rtx x)
2612 {
2613 const enum rtx_code code = GET_CODE (x);
2614 int i, j;
2615 const char *fmt;
2616
2617 switch (code)
2618 {
2619 case LABEL_REF:
2620 case PC:
2621 return 0;
2622
2623 case CONST:
2624 case CONST_INT:
2625 case CONST_DOUBLE:
2626 case CONST_FIXED:
2627 case CONST_VECTOR:
2628 case SYMBOL_REF:
2629 case REG:
2630 return 1;
2631
2632 case MEM:
2633 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2634 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2635
2636 case IF_THEN_ELSE:
2637 return (computed_jump_p_1 (XEXP (x, 1))
2638 || computed_jump_p_1 (XEXP (x, 2)));
2639
2640 default:
2641 break;
2642 }
2643
2644 fmt = GET_RTX_FORMAT (code);
2645 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2646 {
2647 if (fmt[i] == 'e'
2648 && computed_jump_p_1 (XEXP (x, i)))
2649 return 1;
2650
2651 else if (fmt[i] == 'E')
2652 for (j = 0; j < XVECLEN (x, i); j++)
2653 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2654 return 1;
2655 }
2656
2657 return 0;
2658 }
2659
2660 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2661
2662 Tablejumps and casesi insns are not considered indirect jumps;
2663 we can recognize them by a (use (label_ref)). */
2664
2665 int
2666 computed_jump_p (const_rtx insn)
2667 {
2668 int i;
2669 if (JUMP_P (insn))
2670 {
2671 rtx pat = PATTERN (insn);
2672
2673 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2674 if (JUMP_LABEL (insn) != NULL)
2675 return 0;
2676
2677 if (GET_CODE (pat) == PARALLEL)
2678 {
2679 int len = XVECLEN (pat, 0);
2680 int has_use_labelref = 0;
2681
2682 for (i = len - 1; i >= 0; i--)
2683 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2684 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2685 == LABEL_REF))
2686 has_use_labelref = 1;
2687
2688 if (! has_use_labelref)
2689 for (i = len - 1; i >= 0; i--)
2690 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2691 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2692 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2693 return 1;
2694 }
2695 else if (GET_CODE (pat) == SET
2696 && SET_DEST (pat) == pc_rtx
2697 && computed_jump_p_1 (SET_SRC (pat)))
2698 return 1;
2699 }
2700 return 0;
2701 }
2702
2703 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2704 calls. Processes the subexpressions of EXP and passes them to F. */
2705 static int
2706 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2707 {
2708 int result, i, j;
2709 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2710 rtx *x;
2711
2712 for (; format[n] != '\0'; n++)
2713 {
2714 switch (format[n])
2715 {
2716 case 'e':
2717 /* Call F on X. */
2718 x = &XEXP (exp, n);
2719 result = (*f) (x, data);
2720 if (result == -1)
2721 /* Do not traverse sub-expressions. */
2722 continue;
2723 else if (result != 0)
2724 /* Stop the traversal. */
2725 return result;
2726
2727 if (*x == NULL_RTX)
2728 /* There are no sub-expressions. */
2729 continue;
2730
2731 i = non_rtx_starting_operands[GET_CODE (*x)];
2732 if (i >= 0)
2733 {
2734 result = for_each_rtx_1 (*x, i, f, data);
2735 if (result != 0)
2736 return result;
2737 }
2738 break;
2739
2740 case 'V':
2741 case 'E':
2742 if (XVEC (exp, n) == 0)
2743 continue;
2744 for (j = 0; j < XVECLEN (exp, n); ++j)
2745 {
2746 /* Call F on X. */
2747 x = &XVECEXP (exp, n, j);
2748 result = (*f) (x, data);
2749 if (result == -1)
2750 /* Do not traverse sub-expressions. */
2751 continue;
2752 else if (result != 0)
2753 /* Stop the traversal. */
2754 return result;
2755
2756 if (*x == NULL_RTX)
2757 /* There are no sub-expressions. */
2758 continue;
2759
2760 i = non_rtx_starting_operands[GET_CODE (*x)];
2761 if (i >= 0)
2762 {
2763 result = for_each_rtx_1 (*x, i, f, data);
2764 if (result != 0)
2765 return result;
2766 }
2767 }
2768 break;
2769
2770 default:
2771 /* Nothing to do. */
2772 break;
2773 }
2774 }
2775
2776 return 0;
2777 }
2778
2779 /* Traverse X via depth-first search, calling F for each
2780 sub-expression (including X itself). F is also passed the DATA.
2781 If F returns -1, do not traverse sub-expressions, but continue
2782 traversing the rest of the tree. If F ever returns any other
2783 nonzero value, stop the traversal, and return the value returned
2784 by F. Otherwise, return 0. This function does not traverse inside
2785 tree structure that contains RTX_EXPRs, or into sub-expressions
2786 whose format code is `0' since it is not known whether or not those
2787 codes are actually RTL.
2788
2789 This routine is very general, and could (should?) be used to
2790 implement many of the other routines in this file. */
2791
2792 int
2793 for_each_rtx (rtx *x, rtx_function f, void *data)
2794 {
2795 int result;
2796 int i;
2797
2798 /* Call F on X. */
2799 result = (*f) (x, data);
2800 if (result == -1)
2801 /* Do not traverse sub-expressions. */
2802 return 0;
2803 else if (result != 0)
2804 /* Stop the traversal. */
2805 return result;
2806
2807 if (*x == NULL_RTX)
2808 /* There are no sub-expressions. */
2809 return 0;
2810
2811 i = non_rtx_starting_operands[GET_CODE (*x)];
2812 if (i < 0)
2813 return 0;
2814
2815 return for_each_rtx_1 (*x, i, f, data);
2816 }
2817
2818
2819 /* Searches X for any reference to REGNO, returning the rtx of the
2820 reference found if any. Otherwise, returns NULL_RTX. */
2821
2822 rtx
2823 regno_use_in (unsigned int regno, rtx x)
2824 {
2825 const char *fmt;
2826 int i, j;
2827 rtx tem;
2828
2829 if (REG_P (x) && REGNO (x) == regno)
2830 return x;
2831
2832 fmt = GET_RTX_FORMAT (GET_CODE (x));
2833 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2834 {
2835 if (fmt[i] == 'e')
2836 {
2837 if ((tem = regno_use_in (regno, XEXP (x, i))))
2838 return tem;
2839 }
2840 else if (fmt[i] == 'E')
2841 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2842 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2843 return tem;
2844 }
2845
2846 return NULL_RTX;
2847 }
2848
2849 /* Return a value indicating whether OP, an operand of a commutative
2850 operation, is preferred as the first or second operand. The higher
2851 the value, the stronger the preference for being the first operand.
2852 We use negative values to indicate a preference for the first operand
2853 and positive values for the second operand. */
2854
2855 int
2856 commutative_operand_precedence (rtx op)
2857 {
2858 enum rtx_code code = GET_CODE (op);
2859
2860 /* Constants always come the second operand. Prefer "nice" constants. */
2861 if (code == CONST_INT)
2862 return -8;
2863 if (code == CONST_DOUBLE)
2864 return -7;
2865 if (code == CONST_FIXED)
2866 return -7;
2867 op = avoid_constant_pool_reference (op);
2868 code = GET_CODE (op);
2869
2870 switch (GET_RTX_CLASS (code))
2871 {
2872 case RTX_CONST_OBJ:
2873 if (code == CONST_INT)
2874 return -6;
2875 if (code == CONST_DOUBLE)
2876 return -5;
2877 if (code == CONST_FIXED)
2878 return -5;
2879 return -4;
2880
2881 case RTX_EXTRA:
2882 /* SUBREGs of objects should come second. */
2883 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2884 return -3;
2885 return 0;
2886
2887 case RTX_OBJ:
2888 /* Complex expressions should be the first, so decrease priority
2889 of objects. Prefer pointer objects over non pointer objects. */
2890 if ((REG_P (op) && REG_POINTER (op))
2891 || (MEM_P (op) && MEM_POINTER (op)))
2892 return -1;
2893 return -2;
2894
2895 case RTX_COMM_ARITH:
2896 /* Prefer operands that are themselves commutative to be first.
2897 This helps to make things linear. In particular,
2898 (and (and (reg) (reg)) (not (reg))) is canonical. */
2899 return 4;
2900
2901 case RTX_BIN_ARITH:
2902 /* If only one operand is a binary expression, it will be the first
2903 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2904 is canonical, although it will usually be further simplified. */
2905 return 2;
2906
2907 case RTX_UNARY:
2908 /* Then prefer NEG and NOT. */
2909 if (code == NEG || code == NOT)
2910 return 1;
2911
2912 default:
2913 return 0;
2914 }
2915 }
2916
2917 /* Return 1 iff it is necessary to swap operands of commutative operation
2918 in order to canonicalize expression. */
2919
2920 bool
2921 swap_commutative_operands_p (rtx x, rtx y)
2922 {
2923 return (commutative_operand_precedence (x)
2924 < commutative_operand_precedence (y));
2925 }
2926
2927 /* Return 1 if X is an autoincrement side effect and the register is
2928 not the stack pointer. */
2929 int
2930 auto_inc_p (const_rtx x)
2931 {
2932 switch (GET_CODE (x))
2933 {
2934 case PRE_INC:
2935 case POST_INC:
2936 case PRE_DEC:
2937 case POST_DEC:
2938 case PRE_MODIFY:
2939 case POST_MODIFY:
2940 /* There are no REG_INC notes for SP. */
2941 if (XEXP (x, 0) != stack_pointer_rtx)
2942 return 1;
2943 default:
2944 break;
2945 }
2946 return 0;
2947 }
2948
2949 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2950 int
2951 loc_mentioned_in_p (rtx *loc, const_rtx in)
2952 {
2953 enum rtx_code code;
2954 const char *fmt;
2955 int i, j;
2956
2957 if (!in)
2958 return 0;
2959
2960 code = GET_CODE (in);
2961 fmt = GET_RTX_FORMAT (code);
2962 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2963 {
2964 if (fmt[i] == 'e')
2965 {
2966 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
2967 return 1;
2968 }
2969 else if (fmt[i] == 'E')
2970 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
2971 if (loc == &XVECEXP (in, i, j)
2972 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
2973 return 1;
2974 }
2975 return 0;
2976 }
2977
2978 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
2979 and SUBREG_BYTE, return the bit offset where the subreg begins
2980 (counting from the least significant bit of the operand). */
2981
2982 unsigned int
2983 subreg_lsb_1 (enum machine_mode outer_mode,
2984 enum machine_mode inner_mode,
2985 unsigned int subreg_byte)
2986 {
2987 unsigned int bitpos;
2988 unsigned int byte;
2989 unsigned int word;
2990
2991 /* A paradoxical subreg begins at bit position 0. */
2992 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
2993 return 0;
2994
2995 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
2996 /* If the subreg crosses a word boundary ensure that
2997 it also begins and ends on a word boundary. */
2998 gcc_assert (!((subreg_byte % UNITS_PER_WORD
2999 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3000 && (subreg_byte % UNITS_PER_WORD
3001 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3002
3003 if (WORDS_BIG_ENDIAN)
3004 word = (GET_MODE_SIZE (inner_mode)
3005 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3006 else
3007 word = subreg_byte / UNITS_PER_WORD;
3008 bitpos = word * BITS_PER_WORD;
3009
3010 if (BYTES_BIG_ENDIAN)
3011 byte = (GET_MODE_SIZE (inner_mode)
3012 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3013 else
3014 byte = subreg_byte % UNITS_PER_WORD;
3015 bitpos += byte * BITS_PER_UNIT;
3016
3017 return bitpos;
3018 }
3019
3020 /* Given a subreg X, return the bit offset where the subreg begins
3021 (counting from the least significant bit of the reg). */
3022
3023 unsigned int
3024 subreg_lsb (const_rtx x)
3025 {
3026 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3027 SUBREG_BYTE (x));
3028 }
3029
3030 /* Fill in information about a subreg of a hard register.
3031 xregno - A regno of an inner hard subreg_reg (or what will become one).
3032 xmode - The mode of xregno.
3033 offset - The byte offset.
3034 ymode - The mode of a top level SUBREG (or what may become one).
3035 info - Pointer to structure to fill in. */
3036 static void
3037 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3038 unsigned int offset, enum machine_mode ymode,
3039 struct subreg_info *info)
3040 {
3041 int nregs_xmode, nregs_ymode;
3042 int mode_multiple, nregs_multiple;
3043 int offset_adj, y_offset, y_offset_adj;
3044 int regsize_xmode, regsize_ymode;
3045 bool rknown;
3046
3047 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3048
3049 rknown = false;
3050
3051 /* If there are holes in a non-scalar mode in registers, we expect
3052 that it is made up of its units concatenated together. */
3053 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3054 {
3055 enum machine_mode xmode_unit;
3056
3057 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3058 if (GET_MODE_INNER (xmode) == VOIDmode)
3059 xmode_unit = xmode;
3060 else
3061 xmode_unit = GET_MODE_INNER (xmode);
3062 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3063 gcc_assert (nregs_xmode
3064 == (GET_MODE_NUNITS (xmode)
3065 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3066 gcc_assert (hard_regno_nregs[xregno][xmode]
3067 == (hard_regno_nregs[xregno][xmode_unit]
3068 * GET_MODE_NUNITS (xmode)));
3069
3070 /* You can only ask for a SUBREG of a value with holes in the middle
3071 if you don't cross the holes. (Such a SUBREG should be done by
3072 picking a different register class, or doing it in memory if
3073 necessary.) An example of a value with holes is XCmode on 32-bit
3074 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3075 3 for each part, but in memory it's two 128-bit parts.
3076 Padding is assumed to be at the end (not necessarily the 'high part')
3077 of each unit. */
3078 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3079 < GET_MODE_NUNITS (xmode))
3080 && (offset / GET_MODE_SIZE (xmode_unit)
3081 != ((offset + GET_MODE_SIZE (ymode) - 1)
3082 / GET_MODE_SIZE (xmode_unit))))
3083 {
3084 info->representable_p = false;
3085 rknown = true;
3086 }
3087 }
3088 else
3089 nregs_xmode = hard_regno_nregs[xregno][xmode];
3090
3091 nregs_ymode = hard_regno_nregs[xregno][ymode];
3092
3093 /* Paradoxical subregs are otherwise valid. */
3094 if (!rknown
3095 && offset == 0
3096 && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode))
3097 {
3098 info->representable_p = true;
3099 /* If this is a big endian paradoxical subreg, which uses more
3100 actual hard registers than the original register, we must
3101 return a negative offset so that we find the proper highpart
3102 of the register. */
3103 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3104 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3105 info->offset = nregs_xmode - nregs_ymode;
3106 else
3107 info->offset = 0;
3108 info->nregs = nregs_ymode;
3109 return;
3110 }
3111
3112 /* If registers store different numbers of bits in the different
3113 modes, we cannot generally form this subreg. */
3114 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3115 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3116 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3117 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3118 {
3119 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3120 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3121 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3122 {
3123 info->representable_p = false;
3124 info->nregs
3125 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3126 info->offset = offset / regsize_xmode;
3127 return;
3128 }
3129 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3130 {
3131 info->representable_p = false;
3132 info->nregs
3133 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3134 info->offset = offset / regsize_xmode;
3135 return;
3136 }
3137 }
3138
3139 /* Lowpart subregs are otherwise valid. */
3140 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3141 {
3142 info->representable_p = true;
3143 rknown = true;
3144
3145 if (offset == 0 || nregs_xmode == nregs_ymode)
3146 {
3147 info->offset = 0;
3148 info->nregs = nregs_ymode;
3149 return;
3150 }
3151 }
3152
3153 /* This should always pass, otherwise we don't know how to verify
3154 the constraint. These conditions may be relaxed but
3155 subreg_regno_offset would need to be redesigned. */
3156 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3157 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3158
3159 /* The XMODE value can be seen as a vector of NREGS_XMODE
3160 values. The subreg must represent a lowpart of given field.
3161 Compute what field it is. */
3162 offset_adj = offset;
3163 offset_adj -= subreg_lowpart_offset (ymode,
3164 mode_for_size (GET_MODE_BITSIZE (xmode)
3165 / nregs_xmode,
3166 MODE_INT, 0));
3167
3168 /* Size of ymode must not be greater than the size of xmode. */
3169 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3170 gcc_assert (mode_multiple != 0);
3171
3172 y_offset = offset / GET_MODE_SIZE (ymode);
3173 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3174 nregs_multiple = nregs_xmode / nregs_ymode;
3175
3176 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3177 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3178
3179 if (!rknown)
3180 {
3181 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3182 rknown = true;
3183 }
3184 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3185 info->nregs = nregs_ymode;
3186 }
3187
3188 /* This function returns the regno offset of a subreg expression.
3189 xregno - A regno of an inner hard subreg_reg (or what will become one).
3190 xmode - The mode of xregno.
3191 offset - The byte offset.
3192 ymode - The mode of a top level SUBREG (or what may become one).
3193 RETURN - The regno offset which would be used. */
3194 unsigned int
3195 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3196 unsigned int offset, enum machine_mode ymode)
3197 {
3198 struct subreg_info info;
3199 subreg_get_info (xregno, xmode, offset, ymode, &info);
3200 return info.offset;
3201 }
3202
3203 /* This function returns true when the offset is representable via
3204 subreg_offset in the given regno.
3205 xregno - A regno of an inner hard subreg_reg (or what will become one).
3206 xmode - The mode of xregno.
3207 offset - The byte offset.
3208 ymode - The mode of a top level SUBREG (or what may become one).
3209 RETURN - Whether the offset is representable. */
3210 bool
3211 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3212 unsigned int offset, enum machine_mode ymode)
3213 {
3214 struct subreg_info info;
3215 subreg_get_info (xregno, xmode, offset, ymode, &info);
3216 return info.representable_p;
3217 }
3218
3219 /* Return the final regno that a subreg expression refers to. */
3220 unsigned int
3221 subreg_regno (const_rtx x)
3222 {
3223 unsigned int ret;
3224 rtx subreg = SUBREG_REG (x);
3225 int regno = REGNO (subreg);
3226
3227 ret = regno + subreg_regno_offset (regno,
3228 GET_MODE (subreg),
3229 SUBREG_BYTE (x),
3230 GET_MODE (x));
3231 return ret;
3232
3233 }
3234
3235 /* Return the number of registers that a subreg expression refers
3236 to. */
3237 unsigned int
3238 subreg_nregs (const_rtx x)
3239 {
3240 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3241 }
3242
3243 /* Return the number of registers that a subreg REG with REGNO
3244 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3245 changed so that the regno can be passed in. */
3246
3247 unsigned int
3248 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3249 {
3250 struct subreg_info info;
3251 rtx subreg = SUBREG_REG (x);
3252
3253 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3254 &info);
3255 return info.nregs;
3256 }
3257
3258
3259 struct parms_set_data
3260 {
3261 int nregs;
3262 HARD_REG_SET regs;
3263 };
3264
3265 /* Helper function for noticing stores to parameter registers. */
3266 static void
3267 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3268 {
3269 struct parms_set_data *d = data;
3270 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3271 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3272 {
3273 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3274 d->nregs--;
3275 }
3276 }
3277
3278 /* Look backward for first parameter to be loaded.
3279 Note that loads of all parameters will not necessarily be
3280 found if CSE has eliminated some of them (e.g., an argument
3281 to the outer function is passed down as a parameter).
3282 Do not skip BOUNDARY. */
3283 rtx
3284 find_first_parameter_load (rtx call_insn, rtx boundary)
3285 {
3286 struct parms_set_data parm;
3287 rtx p, before, first_set;
3288
3289 /* Since different machines initialize their parameter registers
3290 in different orders, assume nothing. Collect the set of all
3291 parameter registers. */
3292 CLEAR_HARD_REG_SET (parm.regs);
3293 parm.nregs = 0;
3294 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3295 if (GET_CODE (XEXP (p, 0)) == USE
3296 && REG_P (XEXP (XEXP (p, 0), 0)))
3297 {
3298 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3299
3300 /* We only care about registers which can hold function
3301 arguments. */
3302 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3303 continue;
3304
3305 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3306 parm.nregs++;
3307 }
3308 before = call_insn;
3309 first_set = call_insn;
3310
3311 /* Search backward for the first set of a register in this set. */
3312 while (parm.nregs && before != boundary)
3313 {
3314 before = PREV_INSN (before);
3315
3316 /* It is possible that some loads got CSEed from one call to
3317 another. Stop in that case. */
3318 if (CALL_P (before))
3319 break;
3320
3321 /* Our caller needs either ensure that we will find all sets
3322 (in case code has not been optimized yet), or take care
3323 for possible labels in a way by setting boundary to preceding
3324 CODE_LABEL. */
3325 if (LABEL_P (before))
3326 {
3327 gcc_assert (before == boundary);
3328 break;
3329 }
3330
3331 if (INSN_P (before))
3332 {
3333 int nregs_old = parm.nregs;
3334 note_stores (PATTERN (before), parms_set, &parm);
3335 /* If we found something that did not set a parameter reg,
3336 we're done. Do not keep going, as that might result
3337 in hoisting an insn before the setting of a pseudo
3338 that is used by the hoisted insn. */
3339 if (nregs_old != parm.nregs)
3340 first_set = before;
3341 else
3342 break;
3343 }
3344 }
3345 return first_set;
3346 }
3347
3348 /* Return true if we should avoid inserting code between INSN and preceding
3349 call instruction. */
3350
3351 bool
3352 keep_with_call_p (const_rtx insn)
3353 {
3354 rtx set;
3355
3356 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3357 {
3358 if (REG_P (SET_DEST (set))
3359 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3360 && fixed_regs[REGNO (SET_DEST (set))]
3361 && general_operand (SET_SRC (set), VOIDmode))
3362 return true;
3363 if (REG_P (SET_SRC (set))
3364 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
3365 && REG_P (SET_DEST (set))
3366 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3367 return true;
3368 /* There may be a stack pop just after the call and before the store
3369 of the return register. Search for the actual store when deciding
3370 if we can break or not. */
3371 if (SET_DEST (set) == stack_pointer_rtx)
3372 {
3373 /* This CONST_CAST is okay because next_nonnote_insn just
3374 returns it's argument and we assign it to a const_rtx
3375 variable. */
3376 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3377 if (i2 && keep_with_call_p (i2))
3378 return true;
3379 }
3380 }
3381 return false;
3382 }
3383
3384 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3385 to non-complex jumps. That is, direct unconditional, conditional,
3386 and tablejumps, but not computed jumps or returns. It also does
3387 not apply to the fallthru case of a conditional jump. */
3388
3389 bool
3390 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3391 {
3392 rtx tmp = JUMP_LABEL (jump_insn);
3393
3394 if (label == tmp)
3395 return true;
3396
3397 if (tablejump_p (jump_insn, NULL, &tmp))
3398 {
3399 rtvec vec = XVEC (PATTERN (tmp),
3400 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3401 int i, veclen = GET_NUM_ELEM (vec);
3402
3403 for (i = 0; i < veclen; ++i)
3404 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3405 return true;
3406 }
3407
3408 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3409 return true;
3410
3411 return false;
3412 }
3413
3414 \f
3415 /* Return an estimate of the cost of computing rtx X.
3416 One use is in cse, to decide which expression to keep in the hash table.
3417 Another is in rtl generation, to pick the cheapest way to multiply.
3418 Other uses like the latter are expected in the future. */
3419
3420 int
3421 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
3422 {
3423 int i, j;
3424 enum rtx_code code;
3425 const char *fmt;
3426 int total;
3427
3428 if (x == 0)
3429 return 0;
3430
3431 /* Compute the default costs of certain things.
3432 Note that targetm.rtx_costs can override the defaults. */
3433
3434 code = GET_CODE (x);
3435 switch (code)
3436 {
3437 case MULT:
3438 total = COSTS_N_INSNS (5);
3439 break;
3440 case DIV:
3441 case UDIV:
3442 case MOD:
3443 case UMOD:
3444 total = COSTS_N_INSNS (7);
3445 break;
3446 case USE:
3447 /* Used in combine.c as a marker. */
3448 total = 0;
3449 break;
3450 default:
3451 total = COSTS_N_INSNS (1);
3452 }
3453
3454 switch (code)
3455 {
3456 case REG:
3457 return 0;
3458
3459 case SUBREG:
3460 total = 0;
3461 /* If we can't tie these modes, make this expensive. The larger
3462 the mode, the more expensive it is. */
3463 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3464 return COSTS_N_INSNS (2
3465 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3466 break;
3467
3468 default:
3469 if (targetm.rtx_costs (x, code, outer_code, &total))
3470 return total;
3471 break;
3472 }
3473
3474 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3475 which is already in total. */
3476
3477 fmt = GET_RTX_FORMAT (code);
3478 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3479 if (fmt[i] == 'e')
3480 total += rtx_cost (XEXP (x, i), code);
3481 else if (fmt[i] == 'E')
3482 for (j = 0; j < XVECLEN (x, i); j++)
3483 total += rtx_cost (XVECEXP (x, i, j), code);
3484
3485 return total;
3486 }
3487 \f
3488 /* Return cost of address expression X.
3489 Expect that X is properly formed address reference. */
3490
3491 int
3492 address_cost (rtx x, enum machine_mode mode)
3493 {
3494 /* We may be asked for cost of various unusual addresses, such as operands
3495 of push instruction. It is not worthwhile to complicate writing
3496 of the target hook by such cases. */
3497
3498 if (!memory_address_p (mode, x))
3499 return 1000;
3500
3501 return targetm.address_cost (x);
3502 }
3503
3504 /* If the target doesn't override, compute the cost as with arithmetic. */
3505
3506 int
3507 default_address_cost (rtx x)
3508 {
3509 return rtx_cost (x, MEM);
3510 }
3511 \f
3512
3513 unsigned HOST_WIDE_INT
3514 nonzero_bits (const_rtx x, enum machine_mode mode)
3515 {
3516 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3517 }
3518
3519 unsigned int
3520 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3521 {
3522 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3523 }
3524
3525 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3526 It avoids exponential behavior in nonzero_bits1 when X has
3527 identical subexpressions on the first or the second level. */
3528
3529 static unsigned HOST_WIDE_INT
3530 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3531 enum machine_mode known_mode,
3532 unsigned HOST_WIDE_INT known_ret)
3533 {
3534 if (x == known_x && mode == known_mode)
3535 return known_ret;
3536
3537 /* Try to find identical subexpressions. If found call
3538 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3539 precomputed value for the subexpression as KNOWN_RET. */
3540
3541 if (ARITHMETIC_P (x))
3542 {
3543 rtx x0 = XEXP (x, 0);
3544 rtx x1 = XEXP (x, 1);
3545
3546 /* Check the first level. */
3547 if (x0 == x1)
3548 return nonzero_bits1 (x, mode, x0, mode,
3549 cached_nonzero_bits (x0, mode, known_x,
3550 known_mode, known_ret));
3551
3552 /* Check the second level. */
3553 if (ARITHMETIC_P (x0)
3554 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3555 return nonzero_bits1 (x, mode, x1, mode,
3556 cached_nonzero_bits (x1, mode, known_x,
3557 known_mode, known_ret));
3558
3559 if (ARITHMETIC_P (x1)
3560 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3561 return nonzero_bits1 (x, mode, x0, mode,
3562 cached_nonzero_bits (x0, mode, known_x,
3563 known_mode, known_ret));
3564 }
3565
3566 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3567 }
3568
3569 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3570 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3571 is less useful. We can't allow both, because that results in exponential
3572 run time recursion. There is a nullstone testcase that triggered
3573 this. This macro avoids accidental uses of num_sign_bit_copies. */
3574 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3575
3576 /* Given an expression, X, compute which bits in X can be nonzero.
3577 We don't care about bits outside of those defined in MODE.
3578
3579 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3580 an arithmetic operation, we can do better. */
3581
3582 static unsigned HOST_WIDE_INT
3583 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3584 enum machine_mode known_mode,
3585 unsigned HOST_WIDE_INT known_ret)
3586 {
3587 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3588 unsigned HOST_WIDE_INT inner_nz;
3589 enum rtx_code code;
3590 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3591
3592 /* For floating-point values, assume all bits are needed. */
3593 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
3594 return nonzero;
3595
3596 /* If X is wider than MODE, use its mode instead. */
3597 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3598 {
3599 mode = GET_MODE (x);
3600 nonzero = GET_MODE_MASK (mode);
3601 mode_width = GET_MODE_BITSIZE (mode);
3602 }
3603
3604 if (mode_width > HOST_BITS_PER_WIDE_INT)
3605 /* Our only callers in this case look for single bit values. So
3606 just return the mode mask. Those tests will then be false. */
3607 return nonzero;
3608
3609 #ifndef WORD_REGISTER_OPERATIONS
3610 /* If MODE is wider than X, but both are a single word for both the host
3611 and target machines, we can compute this from which bits of the
3612 object might be nonzero in its own mode, taking into account the fact
3613 that on many CISC machines, accessing an object in a wider mode
3614 causes the high-order bits to become undefined. So they are
3615 not known to be zero. */
3616
3617 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3618 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3619 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3620 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3621 {
3622 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3623 known_x, known_mode, known_ret);
3624 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3625 return nonzero;
3626 }
3627 #endif
3628
3629 code = GET_CODE (x);
3630 switch (code)
3631 {
3632 case REG:
3633 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3634 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3635 all the bits above ptr_mode are known to be zero. */
3636 if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3637 && REG_POINTER (x))
3638 nonzero &= GET_MODE_MASK (ptr_mode);
3639 #endif
3640
3641 /* Include declared information about alignment of pointers. */
3642 /* ??? We don't properly preserve REG_POINTER changes across
3643 pointer-to-integer casts, so we can't trust it except for
3644 things that we know must be pointers. See execute/960116-1.c. */
3645 if ((x == stack_pointer_rtx
3646 || x == frame_pointer_rtx
3647 || x == arg_pointer_rtx)
3648 && REGNO_POINTER_ALIGN (REGNO (x)))
3649 {
3650 unsigned HOST_WIDE_INT alignment
3651 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3652
3653 #ifdef PUSH_ROUNDING
3654 /* If PUSH_ROUNDING is defined, it is possible for the
3655 stack to be momentarily aligned only to that amount,
3656 so we pick the least alignment. */
3657 if (x == stack_pointer_rtx && PUSH_ARGS)
3658 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3659 alignment);
3660 #endif
3661
3662 nonzero &= ~(alignment - 1);
3663 }
3664
3665 {
3666 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3667 rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3668 known_mode, known_ret,
3669 &nonzero_for_hook);
3670
3671 if (new)
3672 nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
3673 known_mode, known_ret);
3674
3675 return nonzero_for_hook;
3676 }
3677
3678 case CONST_INT:
3679 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3680 /* If X is negative in MODE, sign-extend the value. */
3681 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3682 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3683 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3684 #endif
3685
3686 return INTVAL (x);
3687
3688 case MEM:
3689 #ifdef LOAD_EXTEND_OP
3690 /* In many, if not most, RISC machines, reading a byte from memory
3691 zeros the rest of the register. Noticing that fact saves a lot
3692 of extra zero-extends. */
3693 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3694 nonzero &= GET_MODE_MASK (GET_MODE (x));
3695 #endif
3696 break;
3697
3698 case EQ: case NE:
3699 case UNEQ: case LTGT:
3700 case GT: case GTU: case UNGT:
3701 case LT: case LTU: case UNLT:
3702 case GE: case GEU: case UNGE:
3703 case LE: case LEU: case UNLE:
3704 case UNORDERED: case ORDERED:
3705 /* If this produces an integer result, we know which bits are set.
3706 Code here used to clear bits outside the mode of X, but that is
3707 now done above. */
3708 /* Mind that MODE is the mode the caller wants to look at this
3709 operation in, and not the actual operation mode. We can wind
3710 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3711 that describes the results of a vector compare. */
3712 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3713 && mode_width <= HOST_BITS_PER_WIDE_INT)
3714 nonzero = STORE_FLAG_VALUE;
3715 break;
3716
3717 case NEG:
3718 #if 0
3719 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3720 and num_sign_bit_copies. */
3721 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3722 == GET_MODE_BITSIZE (GET_MODE (x)))
3723 nonzero = 1;
3724 #endif
3725
3726 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3727 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3728 break;
3729
3730 case ABS:
3731 #if 0
3732 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3733 and num_sign_bit_copies. */
3734 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3735 == GET_MODE_BITSIZE (GET_MODE (x)))
3736 nonzero = 1;
3737 #endif
3738 break;
3739
3740 case TRUNCATE:
3741 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3742 known_x, known_mode, known_ret)
3743 & GET_MODE_MASK (mode));
3744 break;
3745
3746 case ZERO_EXTEND:
3747 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3748 known_x, known_mode, known_ret);
3749 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3750 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3751 break;
3752
3753 case SIGN_EXTEND:
3754 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3755 Otherwise, show all the bits in the outer mode but not the inner
3756 may be nonzero. */
3757 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3758 known_x, known_mode, known_ret);
3759 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3760 {
3761 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3762 if (inner_nz
3763 & (((HOST_WIDE_INT) 1
3764 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3765 inner_nz |= (GET_MODE_MASK (mode)
3766 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3767 }
3768
3769 nonzero &= inner_nz;
3770 break;
3771
3772 case AND:
3773 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3774 known_x, known_mode, known_ret)
3775 & cached_nonzero_bits (XEXP (x, 1), mode,
3776 known_x, known_mode, known_ret);
3777 break;
3778
3779 case XOR: case IOR:
3780 case UMIN: case UMAX: case SMIN: case SMAX:
3781 {
3782 unsigned HOST_WIDE_INT nonzero0 =
3783 cached_nonzero_bits (XEXP (x, 0), mode,
3784 known_x, known_mode, known_ret);
3785
3786 /* Don't call nonzero_bits for the second time if it cannot change
3787 anything. */
3788 if ((nonzero & nonzero0) != nonzero)
3789 nonzero &= nonzero0
3790 | cached_nonzero_bits (XEXP (x, 1), mode,
3791 known_x, known_mode, known_ret);
3792 }
3793 break;
3794
3795 case PLUS: case MINUS:
3796 case MULT:
3797 case DIV: case UDIV:
3798 case MOD: case UMOD:
3799 /* We can apply the rules of arithmetic to compute the number of
3800 high- and low-order zero bits of these operations. We start by
3801 computing the width (position of the highest-order nonzero bit)
3802 and the number of low-order zero bits for each value. */
3803 {
3804 unsigned HOST_WIDE_INT nz0 =
3805 cached_nonzero_bits (XEXP (x, 0), mode,
3806 known_x, known_mode, known_ret);
3807 unsigned HOST_WIDE_INT nz1 =
3808 cached_nonzero_bits (XEXP (x, 1), mode,
3809 known_x, known_mode, known_ret);
3810 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3811 int width0 = floor_log2 (nz0) + 1;
3812 int width1 = floor_log2 (nz1) + 1;
3813 int low0 = floor_log2 (nz0 & -nz0);
3814 int low1 = floor_log2 (nz1 & -nz1);
3815 HOST_WIDE_INT op0_maybe_minusp
3816 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3817 HOST_WIDE_INT op1_maybe_minusp
3818 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3819 unsigned int result_width = mode_width;
3820 int result_low = 0;
3821
3822 switch (code)
3823 {
3824 case PLUS:
3825 result_width = MAX (width0, width1) + 1;
3826 result_low = MIN (low0, low1);
3827 break;
3828 case MINUS:
3829 result_low = MIN (low0, low1);
3830 break;
3831 case MULT:
3832 result_width = width0 + width1;
3833 result_low = low0 + low1;
3834 break;
3835 case DIV:
3836 if (width1 == 0)
3837 break;
3838 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3839 result_width = width0;
3840 break;
3841 case UDIV:
3842 if (width1 == 0)
3843 break;
3844 result_width = width0;
3845 break;
3846 case MOD:
3847 if (width1 == 0)
3848 break;
3849 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3850 result_width = MIN (width0, width1);
3851 result_low = MIN (low0, low1);
3852 break;
3853 case UMOD:
3854 if (width1 == 0)
3855 break;
3856 result_width = MIN (width0, width1);
3857 result_low = MIN (low0, low1);
3858 break;
3859 default:
3860 gcc_unreachable ();
3861 }
3862
3863 if (result_width < mode_width)
3864 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3865
3866 if (result_low > 0)
3867 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3868
3869 #ifdef POINTERS_EXTEND_UNSIGNED
3870 /* If pointers extend unsigned and this is an addition or subtraction
3871 to a pointer in Pmode, all the bits above ptr_mode are known to be
3872 zero. */
3873 if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
3874 && (code == PLUS || code == MINUS)
3875 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
3876 nonzero &= GET_MODE_MASK (ptr_mode);
3877 #endif
3878 }
3879 break;
3880
3881 case ZERO_EXTRACT:
3882 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3883 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3884 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
3885 break;
3886
3887 case SUBREG:
3888 /* If this is a SUBREG formed for a promoted variable that has
3889 been zero-extended, we know that at least the high-order bits
3890 are zero, though others might be too. */
3891
3892 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
3893 nonzero = GET_MODE_MASK (GET_MODE (x))
3894 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
3895 known_x, known_mode, known_ret);
3896
3897 /* If the inner mode is a single word for both the host and target
3898 machines, we can compute this from which bits of the inner
3899 object might be nonzero. */
3900 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
3901 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
3902 <= HOST_BITS_PER_WIDE_INT))
3903 {
3904 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
3905 known_x, known_mode, known_ret);
3906
3907 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
3908 /* If this is a typical RISC machine, we only have to worry
3909 about the way loads are extended. */
3910 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
3911 ? (((nonzero
3912 & (((unsigned HOST_WIDE_INT) 1
3913 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
3914 != 0))
3915 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
3916 || !MEM_P (SUBREG_REG (x)))
3917 #endif
3918 {
3919 /* On many CISC machines, accessing an object in a wider mode
3920 causes the high-order bits to become undefined. So they are
3921 not known to be zero. */
3922 if (GET_MODE_SIZE (GET_MODE (x))
3923 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
3924 nonzero |= (GET_MODE_MASK (GET_MODE (x))
3925 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
3926 }
3927 }
3928 break;
3929
3930 case ASHIFTRT:
3931 case LSHIFTRT:
3932 case ASHIFT:
3933 case ROTATE:
3934 /* The nonzero bits are in two classes: any bits within MODE
3935 that aren't in GET_MODE (x) are always significant. The rest of the
3936 nonzero bits are those that are significant in the operand of
3937 the shift when shifted the appropriate number of bits. This
3938 shows that high-order bits are cleared by the right shift and
3939 low-order bits by left shifts. */
3940 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3941 && INTVAL (XEXP (x, 1)) >= 0
3942 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3943 {
3944 enum machine_mode inner_mode = GET_MODE (x);
3945 unsigned int width = GET_MODE_BITSIZE (inner_mode);
3946 int count = INTVAL (XEXP (x, 1));
3947 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
3948 unsigned HOST_WIDE_INT op_nonzero =
3949 cached_nonzero_bits (XEXP (x, 0), mode,
3950 known_x, known_mode, known_ret);
3951 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
3952 unsigned HOST_WIDE_INT outer = 0;
3953
3954 if (mode_width > width)
3955 outer = (op_nonzero & nonzero & ~mode_mask);
3956
3957 if (code == LSHIFTRT)
3958 inner >>= count;
3959 else if (code == ASHIFTRT)
3960 {
3961 inner >>= count;
3962
3963 /* If the sign bit may have been nonzero before the shift, we
3964 need to mark all the places it could have been copied to
3965 by the shift as possibly nonzero. */
3966 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
3967 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
3968 }
3969 else if (code == ASHIFT)
3970 inner <<= count;
3971 else
3972 inner = ((inner << (count % width)
3973 | (inner >> (width - (count % width)))) & mode_mask);
3974
3975 nonzero &= (outer | inner);
3976 }
3977 break;
3978
3979 case FFS:
3980 case POPCOUNT:
3981 /* This is at most the number of bits in the mode. */
3982 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
3983 break;
3984
3985 case CLZ:
3986 /* If CLZ has a known value at zero, then the nonzero bits are
3987 that value, plus the number of bits in the mode minus one. */
3988 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3989 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3990 else
3991 nonzero = -1;
3992 break;
3993
3994 case CTZ:
3995 /* If CTZ has a known value at zero, then the nonzero bits are
3996 that value, plus the number of bits in the mode minus one. */
3997 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3998 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3999 else
4000 nonzero = -1;
4001 break;
4002
4003 case PARITY:
4004 nonzero = 1;
4005 break;
4006
4007 case IF_THEN_ELSE:
4008 {
4009 unsigned HOST_WIDE_INT nonzero_true =
4010 cached_nonzero_bits (XEXP (x, 1), mode,
4011 known_x, known_mode, known_ret);
4012
4013 /* Don't call nonzero_bits for the second time if it cannot change
4014 anything. */
4015 if ((nonzero & nonzero_true) != nonzero)
4016 nonzero &= nonzero_true
4017 | cached_nonzero_bits (XEXP (x, 2), mode,
4018 known_x, known_mode, known_ret);
4019 }
4020 break;
4021
4022 default:
4023 break;
4024 }
4025
4026 return nonzero;
4027 }
4028
4029 /* See the macro definition above. */
4030 #undef cached_num_sign_bit_copies
4031
4032 \f
4033 /* The function cached_num_sign_bit_copies is a wrapper around
4034 num_sign_bit_copies1. It avoids exponential behavior in
4035 num_sign_bit_copies1 when X has identical subexpressions on the
4036 first or the second level. */
4037
4038 static unsigned int
4039 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4040 enum machine_mode known_mode,
4041 unsigned int known_ret)
4042 {
4043 if (x == known_x && mode == known_mode)
4044 return known_ret;
4045
4046 /* Try to find identical subexpressions. If found call
4047 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4048 the precomputed value for the subexpression as KNOWN_RET. */
4049
4050 if (ARITHMETIC_P (x))
4051 {
4052 rtx x0 = XEXP (x, 0);
4053 rtx x1 = XEXP (x, 1);
4054
4055 /* Check the first level. */
4056 if (x0 == x1)
4057 return
4058 num_sign_bit_copies1 (x, mode, x0, mode,
4059 cached_num_sign_bit_copies (x0, mode, known_x,
4060 known_mode,
4061 known_ret));
4062
4063 /* Check the second level. */
4064 if (ARITHMETIC_P (x0)
4065 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4066 return
4067 num_sign_bit_copies1 (x, mode, x1, mode,
4068 cached_num_sign_bit_copies (x1, mode, known_x,
4069 known_mode,
4070 known_ret));
4071
4072 if (ARITHMETIC_P (x1)
4073 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4074 return
4075 num_sign_bit_copies1 (x, mode, x0, mode,
4076 cached_num_sign_bit_copies (x0, mode, known_x,
4077 known_mode,
4078 known_ret));
4079 }
4080
4081 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4082 }
4083
4084 /* Return the number of bits at the high-order end of X that are known to
4085 be equal to the sign bit. X will be used in mode MODE; if MODE is
4086 VOIDmode, X will be used in its own mode. The returned value will always
4087 be between 1 and the number of bits in MODE. */
4088
4089 static unsigned int
4090 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4091 enum machine_mode known_mode,
4092 unsigned int known_ret)
4093 {
4094 enum rtx_code code = GET_CODE (x);
4095 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
4096 int num0, num1, result;
4097 unsigned HOST_WIDE_INT nonzero;
4098
4099 /* If we weren't given a mode, use the mode of X. If the mode is still
4100 VOIDmode, we don't know anything. Likewise if one of the modes is
4101 floating-point. */
4102
4103 if (mode == VOIDmode)
4104 mode = GET_MODE (x);
4105
4106 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
4107 return 1;
4108
4109 /* For a smaller object, just ignore the high bits. */
4110 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
4111 {
4112 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4113 known_x, known_mode, known_ret);
4114 return MAX (1,
4115 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
4116 }
4117
4118 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
4119 {
4120 #ifndef WORD_REGISTER_OPERATIONS
4121 /* If this machine does not do all register operations on the entire
4122 register and MODE is wider than the mode of X, we can say nothing
4123 at all about the high-order bits. */
4124 return 1;
4125 #else
4126 /* Likewise on machines that do, if the mode of the object is smaller
4127 than a word and loads of that size don't sign extend, we can say
4128 nothing about the high order bits. */
4129 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
4130 #ifdef LOAD_EXTEND_OP
4131 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4132 #endif
4133 )
4134 return 1;
4135 #endif
4136 }
4137
4138 switch (code)
4139 {
4140 case REG:
4141
4142 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4143 /* If pointers extend signed and this is a pointer in Pmode, say that
4144 all the bits above ptr_mode are known to be sign bit copies. */
4145 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
4146 && REG_POINTER (x))
4147 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
4148 #endif
4149
4150 {
4151 unsigned int copies_for_hook = 1, copies = 1;
4152 rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4153 known_mode, known_ret,
4154 &copies_for_hook);
4155
4156 if (new)
4157 copies = cached_num_sign_bit_copies (new, mode, known_x,
4158 known_mode, known_ret);
4159
4160 if (copies > 1 || copies_for_hook > 1)
4161 return MAX (copies, copies_for_hook);
4162
4163 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4164 }
4165 break;
4166
4167 case MEM:
4168 #ifdef LOAD_EXTEND_OP
4169 /* Some RISC machines sign-extend all loads of smaller than a word. */
4170 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4171 return MAX (1, ((int) bitwidth
4172 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
4173 #endif
4174 break;
4175
4176 case CONST_INT:
4177 /* If the constant is negative, take its 1's complement and remask.
4178 Then see how many zero bits we have. */
4179 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
4180 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4181 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4182 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4183
4184 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4185
4186 case SUBREG:
4187 /* If this is a SUBREG for a promoted object that is sign-extended
4188 and we are looking at it in a wider mode, we know that at least the
4189 high-order bits are known to be sign bit copies. */
4190
4191 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4192 {
4193 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4194 known_x, known_mode, known_ret);
4195 return MAX ((int) bitwidth
4196 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4197 num0);
4198 }
4199
4200 /* For a smaller object, just ignore the high bits. */
4201 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4202 {
4203 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4204 known_x, known_mode, known_ret);
4205 return MAX (1, (num0
4206 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4207 - bitwidth)));
4208 }
4209
4210 #ifdef WORD_REGISTER_OPERATIONS
4211 #ifdef LOAD_EXTEND_OP
4212 /* For paradoxical SUBREGs on machines where all register operations
4213 affect the entire register, just look inside. Note that we are
4214 passing MODE to the recursive call, so the number of sign bit copies
4215 will remain relative to that mode, not the inner mode. */
4216
4217 /* This works only if loads sign extend. Otherwise, if we get a
4218 reload for the inner part, it may be loaded from the stack, and
4219 then we lose all sign bit copies that existed before the store
4220 to the stack. */
4221
4222 if ((GET_MODE_SIZE (GET_MODE (x))
4223 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4224 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4225 && MEM_P (SUBREG_REG (x)))
4226 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4227 known_x, known_mode, known_ret);
4228 #endif
4229 #endif
4230 break;
4231
4232 case SIGN_EXTRACT:
4233 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4234 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4235 break;
4236
4237 case SIGN_EXTEND:
4238 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4239 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4240 known_x, known_mode, known_ret));
4241
4242 case TRUNCATE:
4243 /* For a smaller object, just ignore the high bits. */
4244 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4245 known_x, known_mode, known_ret);
4246 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4247 - bitwidth)));
4248
4249 case NOT:
4250 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4251 known_x, known_mode, known_ret);
4252
4253 case ROTATE: case ROTATERT:
4254 /* If we are rotating left by a number of bits less than the number
4255 of sign bit copies, we can just subtract that amount from the
4256 number. */
4257 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4258 && INTVAL (XEXP (x, 1)) >= 0
4259 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4260 {
4261 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4262 known_x, known_mode, known_ret);
4263 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4264 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4265 }
4266 break;
4267
4268 case NEG:
4269 /* In general, this subtracts one sign bit copy. But if the value
4270 is known to be positive, the number of sign bit copies is the
4271 same as that of the input. Finally, if the input has just one bit
4272 that might be nonzero, all the bits are copies of the sign bit. */
4273 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4274 known_x, known_mode, known_ret);
4275 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4276 return num0 > 1 ? num0 - 1 : 1;
4277
4278 nonzero = nonzero_bits (XEXP (x, 0), mode);
4279 if (nonzero == 1)
4280 return bitwidth;
4281
4282 if (num0 > 1
4283 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4284 num0--;
4285
4286 return num0;
4287
4288 case IOR: case AND: case XOR:
4289 case SMIN: case SMAX: case UMIN: case UMAX:
4290 /* Logical operations will preserve the number of sign-bit copies.
4291 MIN and MAX operations always return one of the operands. */
4292 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4293 known_x, known_mode, known_ret);
4294 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4295 known_x, known_mode, known_ret);
4296
4297 /* If num1 is clearing some of the top bits then regardless of
4298 the other term, we are guaranteed to have at least that many
4299 high-order zero bits. */
4300 if (code == AND
4301 && num1 > 1
4302 && bitwidth <= HOST_BITS_PER_WIDE_INT
4303 && GET_CODE (XEXP (x, 1)) == CONST_INT
4304 && !(INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
4305 return num1;
4306
4307 /* Similarly for IOR when setting high-order bits. */
4308 if (code == IOR
4309 && num1 > 1
4310 && bitwidth <= HOST_BITS_PER_WIDE_INT
4311 && GET_CODE (XEXP (x, 1)) == CONST_INT
4312 && (INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
4313 return num1;
4314
4315 return MIN (num0, num1);
4316
4317 case PLUS: case MINUS:
4318 /* For addition and subtraction, we can have a 1-bit carry. However,
4319 if we are subtracting 1 from a positive number, there will not
4320 be such a carry. Furthermore, if the positive number is known to
4321 be 0 or 1, we know the result is either -1 or 0. */
4322
4323 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4324 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4325 {
4326 nonzero = nonzero_bits (XEXP (x, 0), mode);
4327 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4328 return (nonzero == 1 || nonzero == 0 ? bitwidth
4329 : bitwidth - floor_log2 (nonzero) - 1);
4330 }
4331
4332 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4333 known_x, known_mode, known_ret);
4334 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4335 known_x, known_mode, known_ret);
4336 result = MAX (1, MIN (num0, num1) - 1);
4337
4338 #ifdef POINTERS_EXTEND_UNSIGNED
4339 /* If pointers extend signed and this is an addition or subtraction
4340 to a pointer in Pmode, all the bits above ptr_mode are known to be
4341 sign bit copies. */
4342 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4343 && (code == PLUS || code == MINUS)
4344 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4345 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4346 - GET_MODE_BITSIZE (ptr_mode) + 1),
4347 result);
4348 #endif
4349 return result;
4350
4351 case MULT:
4352 /* The number of bits of the product is the sum of the number of
4353 bits of both terms. However, unless one of the terms if known
4354 to be positive, we must allow for an additional bit since negating
4355 a negative number can remove one sign bit copy. */
4356
4357 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4358 known_x, known_mode, known_ret);
4359 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4360 known_x, known_mode, known_ret);
4361
4362 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4363 if (result > 0
4364 && (bitwidth > HOST_BITS_PER_WIDE_INT
4365 || (((nonzero_bits (XEXP (x, 0), mode)
4366 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4367 && ((nonzero_bits (XEXP (x, 1), mode)
4368 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4369 result--;
4370
4371 return MAX (1, result);
4372
4373 case UDIV:
4374 /* The result must be <= the first operand. If the first operand
4375 has the high bit set, we know nothing about the number of sign
4376 bit copies. */
4377 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4378 return 1;
4379 else if ((nonzero_bits (XEXP (x, 0), mode)
4380 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4381 return 1;
4382 else
4383 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4384 known_x, known_mode, known_ret);
4385
4386 case UMOD:
4387 /* The result must be <= the second operand. */
4388 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4389 known_x, known_mode, known_ret);
4390
4391 case DIV:
4392 /* Similar to unsigned division, except that we have to worry about
4393 the case where the divisor is negative, in which case we have
4394 to add 1. */
4395 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4396 known_x, known_mode, known_ret);
4397 if (result > 1
4398 && (bitwidth > HOST_BITS_PER_WIDE_INT
4399 || (nonzero_bits (XEXP (x, 1), mode)
4400 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4401 result--;
4402
4403 return result;
4404
4405 case MOD:
4406 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4407 known_x, known_mode, known_ret);
4408 if (result > 1
4409 && (bitwidth > HOST_BITS_PER_WIDE_INT
4410 || (nonzero_bits (XEXP (x, 1), mode)
4411 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4412 result--;
4413
4414 return result;
4415
4416 case ASHIFTRT:
4417 /* Shifts by a constant add to the number of bits equal to the
4418 sign bit. */
4419 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4420 known_x, known_mode, known_ret);
4421 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4422 && INTVAL (XEXP (x, 1)) > 0)
4423 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4424
4425 return num0;
4426
4427 case ASHIFT:
4428 /* Left shifts destroy copies. */
4429 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4430 || INTVAL (XEXP (x, 1)) < 0
4431 || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
4432 return 1;
4433
4434 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4435 known_x, known_mode, known_ret);
4436 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4437
4438 case IF_THEN_ELSE:
4439 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4440 known_x, known_mode, known_ret);
4441 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4442 known_x, known_mode, known_ret);
4443 return MIN (num0, num1);
4444
4445 case EQ: case NE: case GE: case GT: case LE: case LT:
4446 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4447 case GEU: case GTU: case LEU: case LTU:
4448 case UNORDERED: case ORDERED:
4449 /* If the constant is negative, take its 1's complement and remask.
4450 Then see how many zero bits we have. */
4451 nonzero = STORE_FLAG_VALUE;
4452 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4453 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4454 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4455
4456 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4457
4458 default:
4459 break;
4460 }
4461
4462 /* If we haven't been able to figure it out by one of the above rules,
4463 see if some of the high-order bits are known to be zero. If so,
4464 count those bits and return one less than that amount. If we can't
4465 safely compute the mask for this mode, always return BITWIDTH. */
4466
4467 bitwidth = GET_MODE_BITSIZE (mode);
4468 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4469 return 1;
4470
4471 nonzero = nonzero_bits (x, mode);
4472 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4473 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4474 }
4475
4476 /* Calculate the rtx_cost of a single instruction. A return value of
4477 zero indicates an instruction pattern without a known cost. */
4478
4479 int
4480 insn_rtx_cost (rtx pat)
4481 {
4482 int i, cost;
4483 rtx set;
4484
4485 /* Extract the single set rtx from the instruction pattern.
4486 We can't use single_set since we only have the pattern. */
4487 if (GET_CODE (pat) == SET)
4488 set = pat;
4489 else if (GET_CODE (pat) == PARALLEL)
4490 {
4491 set = NULL_RTX;
4492 for (i = 0; i < XVECLEN (pat, 0); i++)
4493 {
4494 rtx x = XVECEXP (pat, 0, i);
4495 if (GET_CODE (x) == SET)
4496 {
4497 if (set)
4498 return 0;
4499 set = x;
4500 }
4501 }
4502 if (!set)
4503 return 0;
4504 }
4505 else
4506 return 0;
4507
4508 cost = rtx_cost (SET_SRC (set), SET);
4509 return cost > 0 ? cost : COSTS_N_INSNS (1);
4510 }
4511
4512 /* Given an insn INSN and condition COND, return the condition in a
4513 canonical form to simplify testing by callers. Specifically:
4514
4515 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4516 (2) Both operands will be machine operands; (cc0) will have been replaced.
4517 (3) If an operand is a constant, it will be the second operand.
4518 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4519 for GE, GEU, and LEU.
4520
4521 If the condition cannot be understood, or is an inequality floating-point
4522 comparison which needs to be reversed, 0 will be returned.
4523
4524 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4525
4526 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4527 insn used in locating the condition was found. If a replacement test
4528 of the condition is desired, it should be placed in front of that
4529 insn and we will be sure that the inputs are still valid.
4530
4531 If WANT_REG is nonzero, we wish the condition to be relative to that
4532 register, if possible. Therefore, do not canonicalize the condition
4533 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4534 to be a compare to a CC mode register.
4535
4536 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4537 and at INSN. */
4538
4539 rtx
4540 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4541 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4542 {
4543 enum rtx_code code;
4544 rtx prev = insn;
4545 const_rtx set;
4546 rtx tem;
4547 rtx op0, op1;
4548 int reverse_code = 0;
4549 enum machine_mode mode;
4550 basic_block bb = BLOCK_FOR_INSN (insn);
4551
4552 code = GET_CODE (cond);
4553 mode = GET_MODE (cond);
4554 op0 = XEXP (cond, 0);
4555 op1 = XEXP (cond, 1);
4556
4557 if (reverse)
4558 code = reversed_comparison_code (cond, insn);
4559 if (code == UNKNOWN)
4560 return 0;
4561
4562 if (earliest)
4563 *earliest = insn;
4564
4565 /* If we are comparing a register with zero, see if the register is set
4566 in the previous insn to a COMPARE or a comparison operation. Perform
4567 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4568 in cse.c */
4569
4570 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4571 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4572 && op1 == CONST0_RTX (GET_MODE (op0))
4573 && op0 != want_reg)
4574 {
4575 /* Set nonzero when we find something of interest. */
4576 rtx x = 0;
4577
4578 #ifdef HAVE_cc0
4579 /* If comparison with cc0, import actual comparison from compare
4580 insn. */
4581 if (op0 == cc0_rtx)
4582 {
4583 if ((prev = prev_nonnote_insn (prev)) == 0
4584 || !NONJUMP_INSN_P (prev)
4585 || (set = single_set (prev)) == 0
4586 || SET_DEST (set) != cc0_rtx)
4587 return 0;
4588
4589 op0 = SET_SRC (set);
4590 op1 = CONST0_RTX (GET_MODE (op0));
4591 if (earliest)
4592 *earliest = prev;
4593 }
4594 #endif
4595
4596 /* If this is a COMPARE, pick up the two things being compared. */
4597 if (GET_CODE (op0) == COMPARE)
4598 {
4599 op1 = XEXP (op0, 1);
4600 op0 = XEXP (op0, 0);
4601 continue;
4602 }
4603 else if (!REG_P (op0))
4604 break;
4605
4606 /* Go back to the previous insn. Stop if it is not an INSN. We also
4607 stop if it isn't a single set or if it has a REG_INC note because
4608 we don't want to bother dealing with it. */
4609
4610 if ((prev = prev_nonnote_insn (prev)) == 0
4611 || !NONJUMP_INSN_P (prev)
4612 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4613 /* In cfglayout mode, there do not have to be labels at the
4614 beginning of a block, or jumps at the end, so the previous
4615 conditions would not stop us when we reach bb boundary. */
4616 || BLOCK_FOR_INSN (prev) != bb)
4617 break;
4618
4619 set = set_of (op0, prev);
4620
4621 if (set
4622 && (GET_CODE (set) != SET
4623 || !rtx_equal_p (SET_DEST (set), op0)))
4624 break;
4625
4626 /* If this is setting OP0, get what it sets it to if it looks
4627 relevant. */
4628 if (set)
4629 {
4630 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4631 #ifdef FLOAT_STORE_FLAG_VALUE
4632 REAL_VALUE_TYPE fsfv;
4633 #endif
4634
4635 /* ??? We may not combine comparisons done in a CCmode with
4636 comparisons not done in a CCmode. This is to aid targets
4637 like Alpha that have an IEEE compliant EQ instruction, and
4638 a non-IEEE compliant BEQ instruction. The use of CCmode is
4639 actually artificial, simply to prevent the combination, but
4640 should not affect other platforms.
4641
4642 However, we must allow VOIDmode comparisons to match either
4643 CCmode or non-CCmode comparison, because some ports have
4644 modeless comparisons inside branch patterns.
4645
4646 ??? This mode check should perhaps look more like the mode check
4647 in simplify_comparison in combine. */
4648
4649 if ((GET_CODE (SET_SRC (set)) == COMPARE
4650 || (((code == NE
4651 || (code == LT
4652 && GET_MODE_CLASS (inner_mode) == MODE_INT
4653 && (GET_MODE_BITSIZE (inner_mode)
4654 <= HOST_BITS_PER_WIDE_INT)
4655 && (STORE_FLAG_VALUE
4656 & ((HOST_WIDE_INT) 1
4657 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4658 #ifdef FLOAT_STORE_FLAG_VALUE
4659 || (code == LT
4660 && SCALAR_FLOAT_MODE_P (inner_mode)
4661 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4662 REAL_VALUE_NEGATIVE (fsfv)))
4663 #endif
4664 ))
4665 && COMPARISON_P (SET_SRC (set))))
4666 && (((GET_MODE_CLASS (mode) == MODE_CC)
4667 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4668 || mode == VOIDmode || inner_mode == VOIDmode))
4669 x = SET_SRC (set);
4670 else if (((code == EQ
4671 || (code == GE
4672 && (GET_MODE_BITSIZE (inner_mode)
4673 <= HOST_BITS_PER_WIDE_INT)
4674 && GET_MODE_CLASS (inner_mode) == MODE_INT
4675 && (STORE_FLAG_VALUE
4676 & ((HOST_WIDE_INT) 1
4677 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4678 #ifdef FLOAT_STORE_FLAG_VALUE
4679 || (code == GE
4680 && SCALAR_FLOAT_MODE_P (inner_mode)
4681 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4682 REAL_VALUE_NEGATIVE (fsfv)))
4683 #endif
4684 ))
4685 && COMPARISON_P (SET_SRC (set))
4686 && (((GET_MODE_CLASS (mode) == MODE_CC)
4687 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4688 || mode == VOIDmode || inner_mode == VOIDmode))
4689
4690 {
4691 reverse_code = 1;
4692 x = SET_SRC (set);
4693 }
4694 else
4695 break;
4696 }
4697
4698 else if (reg_set_p (op0, prev))
4699 /* If this sets OP0, but not directly, we have to give up. */
4700 break;
4701
4702 if (x)
4703 {
4704 /* If the caller is expecting the condition to be valid at INSN,
4705 make sure X doesn't change before INSN. */
4706 if (valid_at_insn_p)
4707 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4708 break;
4709 if (COMPARISON_P (x))
4710 code = GET_CODE (x);
4711 if (reverse_code)
4712 {
4713 code = reversed_comparison_code (x, prev);
4714 if (code == UNKNOWN)
4715 return 0;
4716 reverse_code = 0;
4717 }
4718
4719 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4720 if (earliest)
4721 *earliest = prev;
4722 }
4723 }
4724
4725 /* If constant is first, put it last. */
4726 if (CONSTANT_P (op0))
4727 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4728
4729 /* If OP0 is the result of a comparison, we weren't able to find what
4730 was really being compared, so fail. */
4731 if (!allow_cc_mode
4732 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4733 return 0;
4734
4735 /* Canonicalize any ordered comparison with integers involving equality
4736 if we can do computations in the relevant mode and we do not
4737 overflow. */
4738
4739 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4740 && GET_CODE (op1) == CONST_INT
4741 && GET_MODE (op0) != VOIDmode
4742 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4743 {
4744 HOST_WIDE_INT const_val = INTVAL (op1);
4745 unsigned HOST_WIDE_INT uconst_val = const_val;
4746 unsigned HOST_WIDE_INT max_val
4747 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4748
4749 switch (code)
4750 {
4751 case LE:
4752 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4753 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4754 break;
4755
4756 /* When cross-compiling, const_val might be sign-extended from
4757 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4758 case GE:
4759 if ((HOST_WIDE_INT) (const_val & max_val)
4760 != (((HOST_WIDE_INT) 1
4761 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4762 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4763 break;
4764
4765 case LEU:
4766 if (uconst_val < max_val)
4767 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4768 break;
4769
4770 case GEU:
4771 if (uconst_val != 0)
4772 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4773 break;
4774
4775 default:
4776 break;
4777 }
4778 }
4779
4780 /* Never return CC0; return zero instead. */
4781 if (CC0_P (op0))
4782 return 0;
4783
4784 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4785 }
4786
4787 /* Given a jump insn JUMP, return the condition that will cause it to branch
4788 to its JUMP_LABEL. If the condition cannot be understood, or is an
4789 inequality floating-point comparison which needs to be reversed, 0 will
4790 be returned.
4791
4792 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4793 insn used in locating the condition was found. If a replacement test
4794 of the condition is desired, it should be placed in front of that
4795 insn and we will be sure that the inputs are still valid. If EARLIEST
4796 is null, the returned condition will be valid at INSN.
4797
4798 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4799 compare CC mode register.
4800
4801 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4802
4803 rtx
4804 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4805 {
4806 rtx cond;
4807 int reverse;
4808 rtx set;
4809
4810 /* If this is not a standard conditional jump, we can't parse it. */
4811 if (!JUMP_P (jump)
4812 || ! any_condjump_p (jump))
4813 return 0;
4814 set = pc_set (jump);
4815
4816 cond = XEXP (SET_SRC (set), 0);
4817
4818 /* If this branches to JUMP_LABEL when the condition is false, reverse
4819 the condition. */
4820 reverse
4821 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4822 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4823
4824 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4825 allow_cc_mode, valid_at_insn_p);
4826 }
4827
4828 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4829 TARGET_MODE_REP_EXTENDED.
4830
4831 Note that we assume that the property of
4832 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4833 narrower than mode B. I.e., if A is a mode narrower than B then in
4834 order to be able to operate on it in mode B, mode A needs to
4835 satisfy the requirements set by the representation of mode B. */
4836
4837 static void
4838 init_num_sign_bit_copies_in_rep (void)
4839 {
4840 enum machine_mode mode, in_mode;
4841
4842 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4843 in_mode = GET_MODE_WIDER_MODE (mode))
4844 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4845 mode = GET_MODE_WIDER_MODE (mode))
4846 {
4847 enum machine_mode i;
4848
4849 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4850 extends to the next widest mode. */
4851 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
4852 || GET_MODE_WIDER_MODE (mode) == in_mode);
4853
4854 /* We are in in_mode. Count how many bits outside of mode
4855 have to be copies of the sign-bit. */
4856 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
4857 {
4858 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
4859
4860 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
4861 /* We can only check sign-bit copies starting from the
4862 top-bit. In order to be able to check the bits we
4863 have already seen we pretend that subsequent bits
4864 have to be sign-bit copies too. */
4865 || num_sign_bit_copies_in_rep [in_mode][mode])
4866 num_sign_bit_copies_in_rep [in_mode][mode]
4867 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
4868 }
4869 }
4870 }
4871
4872 /* Suppose that truncation from the machine mode of X to MODE is not a
4873 no-op. See if there is anything special about X so that we can
4874 assume it already contains a truncated value of MODE. */
4875
4876 bool
4877 truncated_to_mode (enum machine_mode mode, const_rtx x)
4878 {
4879 /* This register has already been used in MODE without explicit
4880 truncation. */
4881 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
4882 return true;
4883
4884 /* See if we already satisfy the requirements of MODE. If yes we
4885 can just switch to MODE. */
4886 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
4887 && (num_sign_bit_copies (x, GET_MODE (x))
4888 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
4889 return true;
4890
4891 return false;
4892 }
4893 \f
4894 /* Initialize non_rtx_starting_operands, which is used to speed up
4895 for_each_rtx. */
4896 void
4897 init_rtlanal (void)
4898 {
4899 int i;
4900 for (i = 0; i < NUM_RTX_CODE; i++)
4901 {
4902 const char *format = GET_RTX_FORMAT (i);
4903 const char *first = strpbrk (format, "eEV");
4904 non_rtx_starting_operands[i] = first ? first - format : -1;
4905 }
4906
4907 init_num_sign_bit_copies_in_rep ();
4908 }
4909 \f
4910 /* Check whether this is a constant pool constant. */
4911 bool
4912 constant_pool_constant_p (rtx x)
4913 {
4914 x = avoid_constant_pool_reference (x);
4915 return GET_CODE (x) == CONST_DOUBLE;
4916 }
4917