reload1.c (delete_output_reload): Count occurrences in CALL_INSN_FUNCTION_USAGE.
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software
4 Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "target.h"
34 #include "output.h"
35 #include "tm_p.h"
36 #include "flags.h"
37 #include "real.h"
38 #include "regs.h"
39 #include "function.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, rtx, void *);
43 static bool covers_regno_p (rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (rtx);
47 static void parms_set (rtx, rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx, enum machine_mode,
50 rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (rtx, enum machine_mode, rtx,
53 enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Bit flags that specify the machine subtype we are compiling for.
66 Bits are tested using macros TARGET_... defined in the tm.h file
67 and set by `-m...' switches. Must be defined in rtlanal.c. */
68
69 int target_flags;
70
71 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
72 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
73 SIGN_EXTEND then while narrowing we also have to enforce the
74 representation and sign-extend the value to mode DESTINATION_REP.
75
76 If the value is already sign-extended to DESTINATION_REP mode we
77 can just switch to DESTINATION mode on it. For each pair of
78 integral modes SOURCE and DESTINATION, when truncating from SOURCE
79 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
80 contains the number of high-order bits in SOURCE that have to be
81 copies of the sign-bit so that we can do this mode-switch to
82 DESTINATION. */
83
84 static unsigned int
85 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
86 \f
87 /* Return 1 if the value of X is unstable
88 (would be different at a different point in the program).
89 The frame pointer, arg pointer, etc. are considered stable
90 (within one function) and so is anything marked `unchanging'. */
91
92 int
93 rtx_unstable_p (rtx x)
94 {
95 RTX_CODE code = GET_CODE (x);
96 int i;
97 const char *fmt;
98
99 switch (code)
100 {
101 case MEM:
102 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
103
104 case CONST:
105 case CONST_INT:
106 case CONST_DOUBLE:
107 case CONST_VECTOR:
108 case SYMBOL_REF:
109 case LABEL_REF:
110 return 0;
111
112 case REG:
113 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
114 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
115 /* The arg pointer varies if it is not a fixed register. */
116 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
117 return 0;
118 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
119 /* ??? When call-clobbered, the value is stable modulo the restore
120 that must happen after a call. This currently screws up local-alloc
121 into believing that the restore is not needed. */
122 if (x == pic_offset_table_rtx)
123 return 0;
124 #endif
125 return 1;
126
127 case ASM_OPERANDS:
128 if (MEM_VOLATILE_P (x))
129 return 1;
130
131 /* Fall through. */
132
133 default:
134 break;
135 }
136
137 fmt = GET_RTX_FORMAT (code);
138 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
139 if (fmt[i] == 'e')
140 {
141 if (rtx_unstable_p (XEXP (x, i)))
142 return 1;
143 }
144 else if (fmt[i] == 'E')
145 {
146 int j;
147 for (j = 0; j < XVECLEN (x, i); j++)
148 if (rtx_unstable_p (XVECEXP (x, i, j)))
149 return 1;
150 }
151
152 return 0;
153 }
154
155 /* Return 1 if X has a value that can vary even between two
156 executions of the program. 0 means X can be compared reliably
157 against certain constants or near-constants.
158 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
159 zero, we are slightly more conservative.
160 The frame pointer and the arg pointer are considered constant. */
161
162 int
163 rtx_varies_p (rtx x, int for_alias)
164 {
165 RTX_CODE code;
166 int i;
167 const char *fmt;
168
169 if (!x)
170 return 0;
171
172 code = GET_CODE (x);
173 switch (code)
174 {
175 case MEM:
176 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
177
178 case CONST:
179 case CONST_INT:
180 case CONST_DOUBLE:
181 case CONST_VECTOR:
182 case SYMBOL_REF:
183 case LABEL_REF:
184 return 0;
185
186 case REG:
187 /* Note that we have to test for the actual rtx used for the frame
188 and arg pointers and not just the register number in case we have
189 eliminated the frame and/or arg pointer and are using it
190 for pseudos. */
191 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
192 /* The arg pointer varies if it is not a fixed register. */
193 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
194 return 0;
195 if (x == pic_offset_table_rtx
196 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
197 /* ??? When call-clobbered, the value is stable modulo the restore
198 that must happen after a call. This currently screws up
199 local-alloc into believing that the restore is not needed, so we
200 must return 0 only if we are called from alias analysis. */
201 && for_alias
202 #endif
203 )
204 return 0;
205 return 1;
206
207 case LO_SUM:
208 /* The operand 0 of a LO_SUM is considered constant
209 (in fact it is related specifically to operand 1)
210 during alias analysis. */
211 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
212 || rtx_varies_p (XEXP (x, 1), for_alias);
213
214 case ASM_OPERANDS:
215 if (MEM_VOLATILE_P (x))
216 return 1;
217
218 /* Fall through. */
219
220 default:
221 break;
222 }
223
224 fmt = GET_RTX_FORMAT (code);
225 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
226 if (fmt[i] == 'e')
227 {
228 if (rtx_varies_p (XEXP (x, i), for_alias))
229 return 1;
230 }
231 else if (fmt[i] == 'E')
232 {
233 int j;
234 for (j = 0; j < XVECLEN (x, i); j++)
235 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
236 return 1;
237 }
238
239 return 0;
240 }
241
242 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
243 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
244 whether nonzero is returned for unaligned memory accesses on strict
245 alignment machines. */
246
247 static int
248 rtx_addr_can_trap_p_1 (rtx x, enum machine_mode mode, bool unaligned_mems)
249 {
250 enum rtx_code code = GET_CODE (x);
251
252 switch (code)
253 {
254 case SYMBOL_REF:
255 return SYMBOL_REF_WEAK (x);
256
257 case LABEL_REF:
258 return 0;
259
260 case REG:
261 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
262 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
263 || x == stack_pointer_rtx
264 /* The arg pointer varies if it is not a fixed register. */
265 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
266 return 0;
267 /* All of the virtual frame registers are stack references. */
268 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
269 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
270 return 0;
271 return 1;
272
273 case CONST:
274 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
275
276 case PLUS:
277 /* An address is assumed not to trap if:
278 - it is an address that can't trap plus a constant integer,
279 with the proper remainder modulo the mode size if we are
280 considering unaligned memory references. */
281 if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
282 && GET_CODE (XEXP (x, 1)) == CONST_INT)
283 {
284 HOST_WIDE_INT offset;
285
286 if (!STRICT_ALIGNMENT
287 || !unaligned_mems
288 || GET_MODE_SIZE (mode) == 0)
289 return 0;
290
291 offset = INTVAL (XEXP (x, 1));
292
293 #ifdef SPARC_STACK_BOUNDARY_HACK
294 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
295 the real alignment of %sp. However, when it does this, the
296 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
297 if (SPARC_STACK_BOUNDARY_HACK
298 && (XEXP (x, 0) == stack_pointer_rtx
299 || XEXP (x, 0) == hard_frame_pointer_rtx))
300 offset -= STACK_POINTER_OFFSET;
301 #endif
302
303 return offset % GET_MODE_SIZE (mode) != 0;
304 }
305
306 /* - or it is the pic register plus a constant. */
307 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
308 return 0;
309
310 return 1;
311
312 case LO_SUM:
313 case PRE_MODIFY:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
315
316 case PRE_DEC:
317 case PRE_INC:
318 case POST_DEC:
319 case POST_INC:
320 case POST_MODIFY:
321 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
322
323 default:
324 break;
325 }
326
327 /* If it isn't one of the case above, it can cause a trap. */
328 return 1;
329 }
330
331 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
332
333 int
334 rtx_addr_can_trap_p (rtx x)
335 {
336 return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
337 }
338
339 /* Return true if X is an address that is known to not be zero. */
340
341 bool
342 nonzero_address_p (rtx x)
343 {
344 enum rtx_code code = GET_CODE (x);
345
346 switch (code)
347 {
348 case SYMBOL_REF:
349 return !SYMBOL_REF_WEAK (x);
350
351 case LABEL_REF:
352 return true;
353
354 case REG:
355 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
356 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
357 || x == stack_pointer_rtx
358 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
359 return true;
360 /* All of the virtual frame registers are stack references. */
361 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
362 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
363 return true;
364 return false;
365
366 case CONST:
367 return nonzero_address_p (XEXP (x, 0));
368
369 case PLUS:
370 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
371 return nonzero_address_p (XEXP (x, 0));
372 /* Handle PIC references. */
373 else if (XEXP (x, 0) == pic_offset_table_rtx
374 && CONSTANT_P (XEXP (x, 1)))
375 return true;
376 return false;
377
378 case PRE_MODIFY:
379 /* Similar to the above; allow positive offsets. Further, since
380 auto-inc is only allowed in memories, the register must be a
381 pointer. */
382 if (GET_CODE (XEXP (x, 1)) == CONST_INT
383 && INTVAL (XEXP (x, 1)) > 0)
384 return true;
385 return nonzero_address_p (XEXP (x, 0));
386
387 case PRE_INC:
388 /* Similarly. Further, the offset is always positive. */
389 return true;
390
391 case PRE_DEC:
392 case POST_DEC:
393 case POST_INC:
394 case POST_MODIFY:
395 return nonzero_address_p (XEXP (x, 0));
396
397 case LO_SUM:
398 return nonzero_address_p (XEXP (x, 1));
399
400 default:
401 break;
402 }
403
404 /* If it isn't one of the case above, might be zero. */
405 return false;
406 }
407
408 /* Return 1 if X refers to a memory location whose address
409 cannot be compared reliably with constant addresses,
410 or if X refers to a BLKmode memory object.
411 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
412 zero, we are slightly more conservative. */
413
414 int
415 rtx_addr_varies_p (rtx x, int for_alias)
416 {
417 enum rtx_code code;
418 int i;
419 const char *fmt;
420
421 if (x == 0)
422 return 0;
423
424 code = GET_CODE (x);
425 if (code == MEM)
426 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
427
428 fmt = GET_RTX_FORMAT (code);
429 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
430 if (fmt[i] == 'e')
431 {
432 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
433 return 1;
434 }
435 else if (fmt[i] == 'E')
436 {
437 int j;
438 for (j = 0; j < XVECLEN (x, i); j++)
439 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
440 return 1;
441 }
442 return 0;
443 }
444 \f
445 /* Return the value of the integer term in X, if one is apparent;
446 otherwise return 0.
447 Only obvious integer terms are detected.
448 This is used in cse.c with the `related_value' field. */
449
450 HOST_WIDE_INT
451 get_integer_term (rtx x)
452 {
453 if (GET_CODE (x) == CONST)
454 x = XEXP (x, 0);
455
456 if (GET_CODE (x) == MINUS
457 && GET_CODE (XEXP (x, 1)) == CONST_INT)
458 return - INTVAL (XEXP (x, 1));
459 if (GET_CODE (x) == PLUS
460 && GET_CODE (XEXP (x, 1)) == CONST_INT)
461 return INTVAL (XEXP (x, 1));
462 return 0;
463 }
464
465 /* If X is a constant, return the value sans apparent integer term;
466 otherwise return 0.
467 Only obvious integer terms are detected. */
468
469 rtx
470 get_related_value (rtx x)
471 {
472 if (GET_CODE (x) != CONST)
473 return 0;
474 x = XEXP (x, 0);
475 if (GET_CODE (x) == PLUS
476 && GET_CODE (XEXP (x, 1)) == CONST_INT)
477 return XEXP (x, 0);
478 else if (GET_CODE (x) == MINUS
479 && GET_CODE (XEXP (x, 1)) == CONST_INT)
480 return XEXP (x, 0);
481 return 0;
482 }
483 \f
484 /* Return the number of places FIND appears within X. If COUNT_DEST is
485 zero, we do not count occurrences inside the destination of a SET. */
486
487 int
488 count_occurrences (rtx x, rtx find, int count_dest)
489 {
490 int i, j;
491 enum rtx_code code;
492 const char *format_ptr;
493 int count;
494
495 if (x == find)
496 return 1;
497
498 code = GET_CODE (x);
499
500 switch (code)
501 {
502 case REG:
503 case CONST_INT:
504 case CONST_DOUBLE:
505 case CONST_VECTOR:
506 case SYMBOL_REF:
507 case CODE_LABEL:
508 case PC:
509 case CC0:
510 return 0;
511
512 case EXPR_LIST:
513 count = count_occurrences (XEXP (x, 0), find, count_dest);
514 if (XEXP (x, 1))
515 count += count_occurrences (XEXP (x, 1), find, count_dest);
516 return count;
517
518 case MEM:
519 if (MEM_P (find) && rtx_equal_p (x, find))
520 return 1;
521 break;
522
523 case SET:
524 if (SET_DEST (x) == find && ! count_dest)
525 return count_occurrences (SET_SRC (x), find, count_dest);
526 break;
527
528 default:
529 break;
530 }
531
532 format_ptr = GET_RTX_FORMAT (code);
533 count = 0;
534
535 for (i = 0; i < GET_RTX_LENGTH (code); i++)
536 {
537 switch (*format_ptr++)
538 {
539 case 'e':
540 count += count_occurrences (XEXP (x, i), find, count_dest);
541 break;
542
543 case 'E':
544 for (j = 0; j < XVECLEN (x, i); j++)
545 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
546 break;
547 }
548 }
549 return count;
550 }
551 \f
552 /* Nonzero if register REG appears somewhere within IN.
553 Also works if REG is not a register; in this case it checks
554 for a subexpression of IN that is Lisp "equal" to REG. */
555
556 int
557 reg_mentioned_p (rtx reg, rtx in)
558 {
559 const char *fmt;
560 int i;
561 enum rtx_code code;
562
563 if (in == 0)
564 return 0;
565
566 if (reg == in)
567 return 1;
568
569 if (GET_CODE (in) == LABEL_REF)
570 return reg == XEXP (in, 0);
571
572 code = GET_CODE (in);
573
574 switch (code)
575 {
576 /* Compare registers by number. */
577 case REG:
578 return REG_P (reg) && REGNO (in) == REGNO (reg);
579
580 /* These codes have no constituent expressions
581 and are unique. */
582 case SCRATCH:
583 case CC0:
584 case PC:
585 return 0;
586
587 case CONST_INT:
588 case CONST_VECTOR:
589 case CONST_DOUBLE:
590 /* These are kept unique for a given value. */
591 return 0;
592
593 default:
594 break;
595 }
596
597 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
598 return 1;
599
600 fmt = GET_RTX_FORMAT (code);
601
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
603 {
604 if (fmt[i] == 'E')
605 {
606 int j;
607 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
608 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
609 return 1;
610 }
611 else if (fmt[i] == 'e'
612 && reg_mentioned_p (reg, XEXP (in, i)))
613 return 1;
614 }
615 return 0;
616 }
617 \f
618 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
619 no CODE_LABEL insn. */
620
621 int
622 no_labels_between_p (rtx beg, rtx end)
623 {
624 rtx p;
625 if (beg == end)
626 return 0;
627 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
628 if (LABEL_P (p))
629 return 0;
630 return 1;
631 }
632
633 /* Nonzero if register REG is used in an insn between
634 FROM_INSN and TO_INSN (exclusive of those two). */
635
636 int
637 reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn)
638 {
639 rtx insn;
640
641 if (from_insn == to_insn)
642 return 0;
643
644 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
645 if (INSN_P (insn)
646 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
647 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
648 return 1;
649 return 0;
650 }
651 \f
652 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
653 is entirely replaced by a new value and the only use is as a SET_DEST,
654 we do not consider it a reference. */
655
656 int
657 reg_referenced_p (rtx x, rtx body)
658 {
659 int i;
660
661 switch (GET_CODE (body))
662 {
663 case SET:
664 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
665 return 1;
666
667 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
668 of a REG that occupies all of the REG, the insn references X if
669 it is mentioned in the destination. */
670 if (GET_CODE (SET_DEST (body)) != CC0
671 && GET_CODE (SET_DEST (body)) != PC
672 && !REG_P (SET_DEST (body))
673 && ! (GET_CODE (SET_DEST (body)) == SUBREG
674 && REG_P (SUBREG_REG (SET_DEST (body)))
675 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
676 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
677 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
678 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
679 && reg_overlap_mentioned_p (x, SET_DEST (body)))
680 return 1;
681 return 0;
682
683 case ASM_OPERANDS:
684 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
685 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
686 return 1;
687 return 0;
688
689 case CALL:
690 case USE:
691 case IF_THEN_ELSE:
692 return reg_overlap_mentioned_p (x, body);
693
694 case TRAP_IF:
695 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
696
697 case PREFETCH:
698 return reg_overlap_mentioned_p (x, XEXP (body, 0));
699
700 case UNSPEC:
701 case UNSPEC_VOLATILE:
702 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
703 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
704 return 1;
705 return 0;
706
707 case PARALLEL:
708 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
709 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
710 return 1;
711 return 0;
712
713 case CLOBBER:
714 if (MEM_P (XEXP (body, 0)))
715 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
716 return 1;
717 return 0;
718
719 case COND_EXEC:
720 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
721 return 1;
722 return reg_referenced_p (x, COND_EXEC_CODE (body));
723
724 default:
725 return 0;
726 }
727 }
728 \f
729 /* Nonzero if register REG is set or clobbered in an insn between
730 FROM_INSN and TO_INSN (exclusive of those two). */
731
732 int
733 reg_set_between_p (rtx reg, rtx from_insn, rtx to_insn)
734 {
735 rtx insn;
736
737 if (from_insn == to_insn)
738 return 0;
739
740 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
741 if (INSN_P (insn) && reg_set_p (reg, insn))
742 return 1;
743 return 0;
744 }
745
746 /* Internals of reg_set_between_p. */
747 int
748 reg_set_p (rtx reg, rtx insn)
749 {
750 /* We can be passed an insn or part of one. If we are passed an insn,
751 check if a side-effect of the insn clobbers REG. */
752 if (INSN_P (insn)
753 && (FIND_REG_INC_NOTE (insn, reg)
754 || (CALL_P (insn)
755 && ((REG_P (reg)
756 && REGNO (reg) < FIRST_PSEUDO_REGISTER
757 && TEST_HARD_REG_BIT (regs_invalidated_by_call,
758 REGNO (reg)))
759 || MEM_P (reg)
760 || find_reg_fusage (insn, CLOBBER, reg)))))
761 return 1;
762
763 return set_of (reg, insn) != NULL_RTX;
764 }
765
766 /* Similar to reg_set_between_p, but check all registers in X. Return 0
767 only if none of them are modified between START and END. Return 1 if
768 X contains a MEM; this routine does usememory aliasing. */
769
770 int
771 modified_between_p (rtx x, rtx start, rtx end)
772 {
773 enum rtx_code code = GET_CODE (x);
774 const char *fmt;
775 int i, j;
776 rtx insn;
777
778 if (start == end)
779 return 0;
780
781 switch (code)
782 {
783 case CONST_INT:
784 case CONST_DOUBLE:
785 case CONST_VECTOR:
786 case CONST:
787 case SYMBOL_REF:
788 case LABEL_REF:
789 return 0;
790
791 case PC:
792 case CC0:
793 return 1;
794
795 case MEM:
796 if (modified_between_p (XEXP (x, 0), start, end))
797 return 1;
798 if (MEM_READONLY_P (x))
799 return 0;
800 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
801 if (memory_modified_in_insn_p (x, insn))
802 return 1;
803 return 0;
804 break;
805
806 case REG:
807 return reg_set_between_p (x, start, end);
808
809 default:
810 break;
811 }
812
813 fmt = GET_RTX_FORMAT (code);
814 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
815 {
816 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
817 return 1;
818
819 else if (fmt[i] == 'E')
820 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
821 if (modified_between_p (XVECEXP (x, i, j), start, end))
822 return 1;
823 }
824
825 return 0;
826 }
827
828 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
829 of them are modified in INSN. Return 1 if X contains a MEM; this routine
830 does use memory aliasing. */
831
832 int
833 modified_in_p (rtx x, rtx insn)
834 {
835 enum rtx_code code = GET_CODE (x);
836 const char *fmt;
837 int i, j;
838
839 switch (code)
840 {
841 case CONST_INT:
842 case CONST_DOUBLE:
843 case CONST_VECTOR:
844 case CONST:
845 case SYMBOL_REF:
846 case LABEL_REF:
847 return 0;
848
849 case PC:
850 case CC0:
851 return 1;
852
853 case MEM:
854 if (modified_in_p (XEXP (x, 0), insn))
855 return 1;
856 if (MEM_READONLY_P (x))
857 return 0;
858 if (memory_modified_in_insn_p (x, insn))
859 return 1;
860 return 0;
861 break;
862
863 case REG:
864 return reg_set_p (x, insn);
865
866 default:
867 break;
868 }
869
870 fmt = GET_RTX_FORMAT (code);
871 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
872 {
873 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
874 return 1;
875
876 else if (fmt[i] == 'E')
877 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
878 if (modified_in_p (XVECEXP (x, i, j), insn))
879 return 1;
880 }
881
882 return 0;
883 }
884 \f
885 /* Helper function for set_of. */
886 struct set_of_data
887 {
888 rtx found;
889 rtx pat;
890 };
891
892 static void
893 set_of_1 (rtx x, rtx pat, void *data1)
894 {
895 struct set_of_data *data = (struct set_of_data *) (data1);
896 if (rtx_equal_p (x, data->pat)
897 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
898 data->found = pat;
899 }
900
901 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
902 (either directly or via STRICT_LOW_PART and similar modifiers). */
903 rtx
904 set_of (rtx pat, rtx insn)
905 {
906 struct set_of_data data;
907 data.found = NULL_RTX;
908 data.pat = pat;
909 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
910 return data.found;
911 }
912 \f
913 /* Given an INSN, return a SET expression if this insn has only a single SET.
914 It may also have CLOBBERs, USEs, or SET whose output
915 will not be used, which we ignore. */
916
917 rtx
918 single_set_2 (rtx insn, rtx pat)
919 {
920 rtx set = NULL;
921 int set_verified = 1;
922 int i;
923
924 if (GET_CODE (pat) == PARALLEL)
925 {
926 for (i = 0; i < XVECLEN (pat, 0); i++)
927 {
928 rtx sub = XVECEXP (pat, 0, i);
929 switch (GET_CODE (sub))
930 {
931 case USE:
932 case CLOBBER:
933 break;
934
935 case SET:
936 /* We can consider insns having multiple sets, where all
937 but one are dead as single set insns. In common case
938 only single set is present in the pattern so we want
939 to avoid checking for REG_UNUSED notes unless necessary.
940
941 When we reach set first time, we just expect this is
942 the single set we are looking for and only when more
943 sets are found in the insn, we check them. */
944 if (!set_verified)
945 {
946 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
947 && !side_effects_p (set))
948 set = NULL;
949 else
950 set_verified = 1;
951 }
952 if (!set)
953 set = sub, set_verified = 0;
954 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
955 || side_effects_p (sub))
956 return NULL_RTX;
957 break;
958
959 default:
960 return NULL_RTX;
961 }
962 }
963 }
964 return set;
965 }
966
967 /* Given an INSN, return nonzero if it has more than one SET, else return
968 zero. */
969
970 int
971 multiple_sets (rtx insn)
972 {
973 int found;
974 int i;
975
976 /* INSN must be an insn. */
977 if (! INSN_P (insn))
978 return 0;
979
980 /* Only a PARALLEL can have multiple SETs. */
981 if (GET_CODE (PATTERN (insn)) == PARALLEL)
982 {
983 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
984 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
985 {
986 /* If we have already found a SET, then return now. */
987 if (found)
988 return 1;
989 else
990 found = 1;
991 }
992 }
993
994 /* Either zero or one SET. */
995 return 0;
996 }
997 \f
998 /* Return nonzero if the destination of SET equals the source
999 and there are no side effects. */
1000
1001 int
1002 set_noop_p (rtx set)
1003 {
1004 rtx src = SET_SRC (set);
1005 rtx dst = SET_DEST (set);
1006
1007 if (dst == pc_rtx && src == pc_rtx)
1008 return 1;
1009
1010 if (MEM_P (dst) && MEM_P (src))
1011 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1012
1013 if (GET_CODE (dst) == ZERO_EXTRACT)
1014 return rtx_equal_p (XEXP (dst, 0), src)
1015 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1016 && !side_effects_p (src);
1017
1018 if (GET_CODE (dst) == STRICT_LOW_PART)
1019 dst = XEXP (dst, 0);
1020
1021 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1022 {
1023 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1024 return 0;
1025 src = SUBREG_REG (src);
1026 dst = SUBREG_REG (dst);
1027 }
1028
1029 return (REG_P (src) && REG_P (dst)
1030 && REGNO (src) == REGNO (dst));
1031 }
1032 \f
1033 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1034 value to itself. */
1035
1036 int
1037 noop_move_p (rtx insn)
1038 {
1039 rtx pat = PATTERN (insn);
1040
1041 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1042 return 1;
1043
1044 /* Insns carrying these notes are useful later on. */
1045 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1046 return 0;
1047
1048 /* For now treat an insn with a REG_RETVAL note as a
1049 a special insn which should not be considered a no-op. */
1050 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
1051 return 0;
1052
1053 if (GET_CODE (pat) == SET && set_noop_p (pat))
1054 return 1;
1055
1056 if (GET_CODE (pat) == PARALLEL)
1057 {
1058 int i;
1059 /* If nothing but SETs of registers to themselves,
1060 this insn can also be deleted. */
1061 for (i = 0; i < XVECLEN (pat, 0); i++)
1062 {
1063 rtx tem = XVECEXP (pat, 0, i);
1064
1065 if (GET_CODE (tem) == USE
1066 || GET_CODE (tem) == CLOBBER)
1067 continue;
1068
1069 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1070 return 0;
1071 }
1072
1073 return 1;
1074 }
1075 return 0;
1076 }
1077 \f
1078
1079 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1080 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1081 If the object was modified, if we hit a partial assignment to X, or hit a
1082 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1083 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1084 be the src. */
1085
1086 rtx
1087 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1088 {
1089 rtx p;
1090
1091 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1092 p = PREV_INSN (p))
1093 if (INSN_P (p))
1094 {
1095 rtx set = single_set (p);
1096 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1097
1098 if (set && rtx_equal_p (x, SET_DEST (set)))
1099 {
1100 rtx src = SET_SRC (set);
1101
1102 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1103 src = XEXP (note, 0);
1104
1105 if ((valid_to == NULL_RTX
1106 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1107 /* Reject hard registers because we don't usually want
1108 to use them; we'd rather use a pseudo. */
1109 && (! (REG_P (src)
1110 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1111 {
1112 *pinsn = p;
1113 return src;
1114 }
1115 }
1116
1117 /* If set in non-simple way, we don't have a value. */
1118 if (reg_set_p (x, p))
1119 break;
1120 }
1121
1122 return x;
1123 }
1124 \f
1125 /* Return nonzero if register in range [REGNO, ENDREGNO)
1126 appears either explicitly or implicitly in X
1127 other than being stored into.
1128
1129 References contained within the substructure at LOC do not count.
1130 LOC may be zero, meaning don't ignore anything. */
1131
1132 int
1133 refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x,
1134 rtx *loc)
1135 {
1136 int i;
1137 unsigned int x_regno;
1138 RTX_CODE code;
1139 const char *fmt;
1140
1141 repeat:
1142 /* The contents of a REG_NONNEG note is always zero, so we must come here
1143 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1144 if (x == 0)
1145 return 0;
1146
1147 code = GET_CODE (x);
1148
1149 switch (code)
1150 {
1151 case REG:
1152 x_regno = REGNO (x);
1153
1154 /* If we modifying the stack, frame, or argument pointer, it will
1155 clobber a virtual register. In fact, we could be more precise,
1156 but it isn't worth it. */
1157 if ((x_regno == STACK_POINTER_REGNUM
1158 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1159 || x_regno == ARG_POINTER_REGNUM
1160 #endif
1161 || x_regno == FRAME_POINTER_REGNUM)
1162 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1163 return 1;
1164
1165 return (endregno > x_regno
1166 && regno < x_regno + (x_regno < FIRST_PSEUDO_REGISTER
1167 ? hard_regno_nregs[x_regno][GET_MODE (x)]
1168 : 1));
1169
1170 case SUBREG:
1171 /* If this is a SUBREG of a hard reg, we can see exactly which
1172 registers are being modified. Otherwise, handle normally. */
1173 if (REG_P (SUBREG_REG (x))
1174 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1175 {
1176 unsigned int inner_regno = subreg_regno (x);
1177 unsigned int inner_endregno
1178 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1179 ? hard_regno_nregs[inner_regno][GET_MODE (x)] : 1);
1180
1181 return endregno > inner_regno && regno < inner_endregno;
1182 }
1183 break;
1184
1185 case CLOBBER:
1186 case SET:
1187 if (&SET_DEST (x) != loc
1188 /* Note setting a SUBREG counts as referring to the REG it is in for
1189 a pseudo but not for hard registers since we can
1190 treat each word individually. */
1191 && ((GET_CODE (SET_DEST (x)) == SUBREG
1192 && loc != &SUBREG_REG (SET_DEST (x))
1193 && REG_P (SUBREG_REG (SET_DEST (x)))
1194 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1195 && refers_to_regno_p (regno, endregno,
1196 SUBREG_REG (SET_DEST (x)), loc))
1197 || (!REG_P (SET_DEST (x))
1198 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1199 return 1;
1200
1201 if (code == CLOBBER || loc == &SET_SRC (x))
1202 return 0;
1203 x = SET_SRC (x);
1204 goto repeat;
1205
1206 default:
1207 break;
1208 }
1209
1210 /* X does not match, so try its subexpressions. */
1211
1212 fmt = GET_RTX_FORMAT (code);
1213 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1214 {
1215 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1216 {
1217 if (i == 0)
1218 {
1219 x = XEXP (x, 0);
1220 goto repeat;
1221 }
1222 else
1223 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1224 return 1;
1225 }
1226 else if (fmt[i] == 'E')
1227 {
1228 int j;
1229 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1230 if (loc != &XVECEXP (x, i, j)
1231 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1232 return 1;
1233 }
1234 }
1235 return 0;
1236 }
1237
1238 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1239 we check if any register number in X conflicts with the relevant register
1240 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1241 contains a MEM (we don't bother checking for memory addresses that can't
1242 conflict because we expect this to be a rare case. */
1243
1244 int
1245 reg_overlap_mentioned_p (rtx x, rtx in)
1246 {
1247 unsigned int regno, endregno;
1248
1249 /* If either argument is a constant, then modifying X can not
1250 affect IN. Here we look at IN, we can profitably combine
1251 CONSTANT_P (x) with the switch statement below. */
1252 if (CONSTANT_P (in))
1253 return 0;
1254
1255 recurse:
1256 switch (GET_CODE (x))
1257 {
1258 case STRICT_LOW_PART:
1259 case ZERO_EXTRACT:
1260 case SIGN_EXTRACT:
1261 /* Overly conservative. */
1262 x = XEXP (x, 0);
1263 goto recurse;
1264
1265 case SUBREG:
1266 regno = REGNO (SUBREG_REG (x));
1267 if (regno < FIRST_PSEUDO_REGISTER)
1268 regno = subreg_regno (x);
1269 goto do_reg;
1270
1271 case REG:
1272 regno = REGNO (x);
1273 do_reg:
1274 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1275 ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
1276 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1277
1278 case MEM:
1279 {
1280 const char *fmt;
1281 int i;
1282
1283 if (MEM_P (in))
1284 return 1;
1285
1286 fmt = GET_RTX_FORMAT (GET_CODE (in));
1287 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1288 if (fmt[i] == 'e')
1289 {
1290 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1291 return 1;
1292 }
1293 else if (fmt[i] == 'E')
1294 {
1295 int j;
1296 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1297 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1298 return 1;
1299 }
1300
1301 return 0;
1302 }
1303
1304 case SCRATCH:
1305 case PC:
1306 case CC0:
1307 return reg_mentioned_p (x, in);
1308
1309 case PARALLEL:
1310 {
1311 int i;
1312
1313 /* If any register in here refers to it we return true. */
1314 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1315 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1316 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1317 return 1;
1318 return 0;
1319 }
1320
1321 default:
1322 gcc_assert (CONSTANT_P (x));
1323 return 0;
1324 }
1325 }
1326 \f
1327 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1328 (X would be the pattern of an insn).
1329 FUN receives two arguments:
1330 the REG, MEM, CC0 or PC being stored in or clobbered,
1331 the SET or CLOBBER rtx that does the store.
1332
1333 If the item being stored in or clobbered is a SUBREG of a hard register,
1334 the SUBREG will be passed. */
1335
1336 void
1337 note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
1338 {
1339 int i;
1340
1341 if (GET_CODE (x) == COND_EXEC)
1342 x = COND_EXEC_CODE (x);
1343
1344 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1345 {
1346 rtx dest = SET_DEST (x);
1347
1348 while ((GET_CODE (dest) == SUBREG
1349 && (!REG_P (SUBREG_REG (dest))
1350 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1351 || GET_CODE (dest) == ZERO_EXTRACT
1352 || GET_CODE (dest) == STRICT_LOW_PART)
1353 dest = XEXP (dest, 0);
1354
1355 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1356 each of whose first operand is a register. */
1357 if (GET_CODE (dest) == PARALLEL)
1358 {
1359 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1360 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1361 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1362 }
1363 else
1364 (*fun) (dest, x, data);
1365 }
1366
1367 else if (GET_CODE (x) == PARALLEL)
1368 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1369 note_stores (XVECEXP (x, 0, i), fun, data);
1370 }
1371 \f
1372 /* Like notes_stores, but call FUN for each expression that is being
1373 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1374 FUN for each expression, not any interior subexpressions. FUN receives a
1375 pointer to the expression and the DATA passed to this function.
1376
1377 Note that this is not quite the same test as that done in reg_referenced_p
1378 since that considers something as being referenced if it is being
1379 partially set, while we do not. */
1380
1381 void
1382 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1383 {
1384 rtx body = *pbody;
1385 int i;
1386
1387 switch (GET_CODE (body))
1388 {
1389 case COND_EXEC:
1390 (*fun) (&COND_EXEC_TEST (body), data);
1391 note_uses (&COND_EXEC_CODE (body), fun, data);
1392 return;
1393
1394 case PARALLEL:
1395 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1396 note_uses (&XVECEXP (body, 0, i), fun, data);
1397 return;
1398
1399 case SEQUENCE:
1400 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1401 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1402 return;
1403
1404 case USE:
1405 (*fun) (&XEXP (body, 0), data);
1406 return;
1407
1408 case ASM_OPERANDS:
1409 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1410 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1411 return;
1412
1413 case TRAP_IF:
1414 (*fun) (&TRAP_CONDITION (body), data);
1415 return;
1416
1417 case PREFETCH:
1418 (*fun) (&XEXP (body, 0), data);
1419 return;
1420
1421 case UNSPEC:
1422 case UNSPEC_VOLATILE:
1423 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1424 (*fun) (&XVECEXP (body, 0, i), data);
1425 return;
1426
1427 case CLOBBER:
1428 if (MEM_P (XEXP (body, 0)))
1429 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1430 return;
1431
1432 case SET:
1433 {
1434 rtx dest = SET_DEST (body);
1435
1436 /* For sets we replace everything in source plus registers in memory
1437 expression in store and operands of a ZERO_EXTRACT. */
1438 (*fun) (&SET_SRC (body), data);
1439
1440 if (GET_CODE (dest) == ZERO_EXTRACT)
1441 {
1442 (*fun) (&XEXP (dest, 1), data);
1443 (*fun) (&XEXP (dest, 2), data);
1444 }
1445
1446 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1447 dest = XEXP (dest, 0);
1448
1449 if (MEM_P (dest))
1450 (*fun) (&XEXP (dest, 0), data);
1451 }
1452 return;
1453
1454 default:
1455 /* All the other possibilities never store. */
1456 (*fun) (pbody, data);
1457 return;
1458 }
1459 }
1460 \f
1461 /* Return nonzero if X's old contents don't survive after INSN.
1462 This will be true if X is (cc0) or if X is a register and
1463 X dies in INSN or because INSN entirely sets X.
1464
1465 "Entirely set" means set directly and not through a SUBREG, or
1466 ZERO_EXTRACT, so no trace of the old contents remains.
1467 Likewise, REG_INC does not count.
1468
1469 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1470 but for this use that makes no difference, since regs don't overlap
1471 during their lifetimes. Therefore, this function may be used
1472 at any time after deaths have been computed (in flow.c).
1473
1474 If REG is a hard reg that occupies multiple machine registers, this
1475 function will only return 1 if each of those registers will be replaced
1476 by INSN. */
1477
1478 int
1479 dead_or_set_p (rtx insn, rtx x)
1480 {
1481 unsigned int regno, last_regno;
1482 unsigned int i;
1483
1484 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1485 if (GET_CODE (x) == CC0)
1486 return 1;
1487
1488 gcc_assert (REG_P (x));
1489
1490 regno = REGNO (x);
1491 last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
1492 : regno + hard_regno_nregs[regno][GET_MODE (x)] - 1);
1493
1494 for (i = regno; i <= last_regno; i++)
1495 if (! dead_or_set_regno_p (insn, i))
1496 return 0;
1497
1498 return 1;
1499 }
1500
1501 /* Return TRUE iff DEST is a register or subreg of a register and
1502 doesn't change the number of words of the inner register, and any
1503 part of the register is TEST_REGNO. */
1504
1505 static bool
1506 covers_regno_no_parallel_p (rtx dest, unsigned int test_regno)
1507 {
1508 unsigned int regno, endregno;
1509
1510 if (GET_CODE (dest) == SUBREG
1511 && (((GET_MODE_SIZE (GET_MODE (dest))
1512 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1513 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1514 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1515 dest = SUBREG_REG (dest);
1516
1517 if (!REG_P (dest))
1518 return false;
1519
1520 regno = REGNO (dest);
1521 endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
1522 : regno + hard_regno_nregs[regno][GET_MODE (dest)]);
1523 return (test_regno >= regno && test_regno < endregno);
1524 }
1525
1526 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1527 any member matches the covers_regno_no_parallel_p criteria. */
1528
1529 static bool
1530 covers_regno_p (rtx dest, unsigned int test_regno)
1531 {
1532 if (GET_CODE (dest) == PARALLEL)
1533 {
1534 /* Some targets place small structures in registers for return
1535 values of functions, and those registers are wrapped in
1536 PARALLELs that we may see as the destination of a SET. */
1537 int i;
1538
1539 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1540 {
1541 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1542 if (inner != NULL_RTX
1543 && covers_regno_no_parallel_p (inner, test_regno))
1544 return true;
1545 }
1546
1547 return false;
1548 }
1549 else
1550 return covers_regno_no_parallel_p (dest, test_regno);
1551 }
1552
1553 /* Utility function for dead_or_set_p to check an individual register. Also
1554 called from flow.c. */
1555
1556 int
1557 dead_or_set_regno_p (rtx insn, unsigned int test_regno)
1558 {
1559 rtx pattern;
1560
1561 /* See if there is a death note for something that includes TEST_REGNO. */
1562 if (find_regno_note (insn, REG_DEAD, test_regno))
1563 return 1;
1564
1565 if (CALL_P (insn)
1566 && find_regno_fusage (insn, CLOBBER, test_regno))
1567 return 1;
1568
1569 pattern = PATTERN (insn);
1570
1571 if (GET_CODE (pattern) == COND_EXEC)
1572 pattern = COND_EXEC_CODE (pattern);
1573
1574 if (GET_CODE (pattern) == SET)
1575 return covers_regno_p (SET_DEST (pattern), test_regno);
1576 else if (GET_CODE (pattern) == PARALLEL)
1577 {
1578 int i;
1579
1580 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1581 {
1582 rtx body = XVECEXP (pattern, 0, i);
1583
1584 if (GET_CODE (body) == COND_EXEC)
1585 body = COND_EXEC_CODE (body);
1586
1587 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1588 && covers_regno_p (SET_DEST (body), test_regno))
1589 return 1;
1590 }
1591 }
1592
1593 return 0;
1594 }
1595
1596 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1597 If DATUM is nonzero, look for one whose datum is DATUM. */
1598
1599 rtx
1600 find_reg_note (rtx insn, enum reg_note kind, rtx datum)
1601 {
1602 rtx link;
1603
1604 gcc_assert (insn);
1605
1606 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1607 if (! INSN_P (insn))
1608 return 0;
1609 if (datum == 0)
1610 {
1611 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1612 if (REG_NOTE_KIND (link) == kind)
1613 return link;
1614 return 0;
1615 }
1616
1617 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1618 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1619 return link;
1620 return 0;
1621 }
1622
1623 /* Return the reg-note of kind KIND in insn INSN which applies to register
1624 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1625 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1626 it might be the case that the note overlaps REGNO. */
1627
1628 rtx
1629 find_regno_note (rtx insn, enum reg_note kind, unsigned int regno)
1630 {
1631 rtx link;
1632
1633 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1634 if (! INSN_P (insn))
1635 return 0;
1636
1637 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1638 if (REG_NOTE_KIND (link) == kind
1639 /* Verify that it is a register, so that scratch and MEM won't cause a
1640 problem here. */
1641 && REG_P (XEXP (link, 0))
1642 && REGNO (XEXP (link, 0)) <= regno
1643 && ((REGNO (XEXP (link, 0))
1644 + (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
1645 : hard_regno_nregs[REGNO (XEXP (link, 0))]
1646 [GET_MODE (XEXP (link, 0))]))
1647 > regno))
1648 return link;
1649 return 0;
1650 }
1651
1652 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1653 has such a note. */
1654
1655 rtx
1656 find_reg_equal_equiv_note (rtx insn)
1657 {
1658 rtx link;
1659
1660 if (!INSN_P (insn))
1661 return 0;
1662 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1663 if (REG_NOTE_KIND (link) == REG_EQUAL
1664 || REG_NOTE_KIND (link) == REG_EQUIV)
1665 {
1666 if (single_set (insn) == 0)
1667 return 0;
1668 return link;
1669 }
1670 return NULL;
1671 }
1672
1673 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1674 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1675
1676 int
1677 find_reg_fusage (rtx insn, enum rtx_code code, rtx datum)
1678 {
1679 /* If it's not a CALL_INSN, it can't possibly have a
1680 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1681 if (!CALL_P (insn))
1682 return 0;
1683
1684 gcc_assert (datum);
1685
1686 if (!REG_P (datum))
1687 {
1688 rtx link;
1689
1690 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1691 link;
1692 link = XEXP (link, 1))
1693 if (GET_CODE (XEXP (link, 0)) == code
1694 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1695 return 1;
1696 }
1697 else
1698 {
1699 unsigned int regno = REGNO (datum);
1700
1701 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1702 to pseudo registers, so don't bother checking. */
1703
1704 if (regno < FIRST_PSEUDO_REGISTER)
1705 {
1706 unsigned int end_regno
1707 = regno + hard_regno_nregs[regno][GET_MODE (datum)];
1708 unsigned int i;
1709
1710 for (i = regno; i < end_regno; i++)
1711 if (find_regno_fusage (insn, code, i))
1712 return 1;
1713 }
1714 }
1715
1716 return 0;
1717 }
1718
1719 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1720 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1721
1722 int
1723 find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno)
1724 {
1725 rtx link;
1726
1727 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1728 to pseudo registers, so don't bother checking. */
1729
1730 if (regno >= FIRST_PSEUDO_REGISTER
1731 || !CALL_P (insn) )
1732 return 0;
1733
1734 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1735 {
1736 unsigned int regnote;
1737 rtx op, reg;
1738
1739 if (GET_CODE (op = XEXP (link, 0)) == code
1740 && REG_P (reg = XEXP (op, 0))
1741 && (regnote = REGNO (reg)) <= regno
1742 && regnote + hard_regno_nregs[regnote][GET_MODE (reg)] > regno)
1743 return 1;
1744 }
1745
1746 return 0;
1747 }
1748
1749 /* Return true if INSN is a call to a pure function. */
1750
1751 int
1752 pure_call_p (rtx insn)
1753 {
1754 rtx link;
1755
1756 if (!CALL_P (insn) || ! CONST_OR_PURE_CALL_P (insn))
1757 return 0;
1758
1759 /* Look for the note that differentiates const and pure functions. */
1760 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1761 {
1762 rtx u, m;
1763
1764 if (GET_CODE (u = XEXP (link, 0)) == USE
1765 && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode
1766 && GET_CODE (XEXP (m, 0)) == SCRATCH)
1767 return 1;
1768 }
1769
1770 return 0;
1771 }
1772 \f
1773 /* Remove register note NOTE from the REG_NOTES of INSN. */
1774
1775 void
1776 remove_note (rtx insn, rtx note)
1777 {
1778 rtx link;
1779
1780 if (note == NULL_RTX)
1781 return;
1782
1783 if (REG_NOTES (insn) == note)
1784 {
1785 REG_NOTES (insn) = XEXP (note, 1);
1786 return;
1787 }
1788
1789 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1790 if (XEXP (link, 1) == note)
1791 {
1792 XEXP (link, 1) = XEXP (note, 1);
1793 return;
1794 }
1795
1796 gcc_unreachable ();
1797 }
1798
1799 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1800 return 1 if it is found. A simple equality test is used to determine if
1801 NODE matches. */
1802
1803 int
1804 in_expr_list_p (rtx listp, rtx node)
1805 {
1806 rtx x;
1807
1808 for (x = listp; x; x = XEXP (x, 1))
1809 if (node == XEXP (x, 0))
1810 return 1;
1811
1812 return 0;
1813 }
1814
1815 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1816 remove that entry from the list if it is found.
1817
1818 A simple equality test is used to determine if NODE matches. */
1819
1820 void
1821 remove_node_from_expr_list (rtx node, rtx *listp)
1822 {
1823 rtx temp = *listp;
1824 rtx prev = NULL_RTX;
1825
1826 while (temp)
1827 {
1828 if (node == XEXP (temp, 0))
1829 {
1830 /* Splice the node out of the list. */
1831 if (prev)
1832 XEXP (prev, 1) = XEXP (temp, 1);
1833 else
1834 *listp = XEXP (temp, 1);
1835
1836 return;
1837 }
1838
1839 prev = temp;
1840 temp = XEXP (temp, 1);
1841 }
1842 }
1843 \f
1844 /* Nonzero if X contains any volatile instructions. These are instructions
1845 which may cause unpredictable machine state instructions, and thus no
1846 instructions should be moved or combined across them. This includes
1847 only volatile asms and UNSPEC_VOLATILE instructions. */
1848
1849 int
1850 volatile_insn_p (rtx x)
1851 {
1852 RTX_CODE code;
1853
1854 code = GET_CODE (x);
1855 switch (code)
1856 {
1857 case LABEL_REF:
1858 case SYMBOL_REF:
1859 case CONST_INT:
1860 case CONST:
1861 case CONST_DOUBLE:
1862 case CONST_VECTOR:
1863 case CC0:
1864 case PC:
1865 case REG:
1866 case SCRATCH:
1867 case CLOBBER:
1868 case ADDR_VEC:
1869 case ADDR_DIFF_VEC:
1870 case CALL:
1871 case MEM:
1872 return 0;
1873
1874 case UNSPEC_VOLATILE:
1875 /* case TRAP_IF: This isn't clear yet. */
1876 return 1;
1877
1878 case ASM_INPUT:
1879 case ASM_OPERANDS:
1880 if (MEM_VOLATILE_P (x))
1881 return 1;
1882
1883 default:
1884 break;
1885 }
1886
1887 /* Recursively scan the operands of this expression. */
1888
1889 {
1890 const char *fmt = GET_RTX_FORMAT (code);
1891 int i;
1892
1893 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1894 {
1895 if (fmt[i] == 'e')
1896 {
1897 if (volatile_insn_p (XEXP (x, i)))
1898 return 1;
1899 }
1900 else if (fmt[i] == 'E')
1901 {
1902 int j;
1903 for (j = 0; j < XVECLEN (x, i); j++)
1904 if (volatile_insn_p (XVECEXP (x, i, j)))
1905 return 1;
1906 }
1907 }
1908 }
1909 return 0;
1910 }
1911
1912 /* Nonzero if X contains any volatile memory references
1913 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
1914
1915 int
1916 volatile_refs_p (rtx x)
1917 {
1918 RTX_CODE code;
1919
1920 code = GET_CODE (x);
1921 switch (code)
1922 {
1923 case LABEL_REF:
1924 case SYMBOL_REF:
1925 case CONST_INT:
1926 case CONST:
1927 case CONST_DOUBLE:
1928 case CONST_VECTOR:
1929 case CC0:
1930 case PC:
1931 case REG:
1932 case SCRATCH:
1933 case CLOBBER:
1934 case ADDR_VEC:
1935 case ADDR_DIFF_VEC:
1936 return 0;
1937
1938 case UNSPEC_VOLATILE:
1939 return 1;
1940
1941 case MEM:
1942 case ASM_INPUT:
1943 case ASM_OPERANDS:
1944 if (MEM_VOLATILE_P (x))
1945 return 1;
1946
1947 default:
1948 break;
1949 }
1950
1951 /* Recursively scan the operands of this expression. */
1952
1953 {
1954 const char *fmt = GET_RTX_FORMAT (code);
1955 int i;
1956
1957 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1958 {
1959 if (fmt[i] == 'e')
1960 {
1961 if (volatile_refs_p (XEXP (x, i)))
1962 return 1;
1963 }
1964 else if (fmt[i] == 'E')
1965 {
1966 int j;
1967 for (j = 0; j < XVECLEN (x, i); j++)
1968 if (volatile_refs_p (XVECEXP (x, i, j)))
1969 return 1;
1970 }
1971 }
1972 }
1973 return 0;
1974 }
1975
1976 /* Similar to above, except that it also rejects register pre- and post-
1977 incrementing. */
1978
1979 int
1980 side_effects_p (rtx x)
1981 {
1982 RTX_CODE code;
1983
1984 code = GET_CODE (x);
1985 switch (code)
1986 {
1987 case LABEL_REF:
1988 case SYMBOL_REF:
1989 case CONST_INT:
1990 case CONST:
1991 case CONST_DOUBLE:
1992 case CONST_VECTOR:
1993 case CC0:
1994 case PC:
1995 case REG:
1996 case SCRATCH:
1997 case ADDR_VEC:
1998 case ADDR_DIFF_VEC:
1999 return 0;
2000
2001 case CLOBBER:
2002 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2003 when some combination can't be done. If we see one, don't think
2004 that we can simplify the expression. */
2005 return (GET_MODE (x) != VOIDmode);
2006
2007 case PRE_INC:
2008 case PRE_DEC:
2009 case POST_INC:
2010 case POST_DEC:
2011 case PRE_MODIFY:
2012 case POST_MODIFY:
2013 case CALL:
2014 case UNSPEC_VOLATILE:
2015 /* case TRAP_IF: This isn't clear yet. */
2016 return 1;
2017
2018 case MEM:
2019 case ASM_INPUT:
2020 case ASM_OPERANDS:
2021 if (MEM_VOLATILE_P (x))
2022 return 1;
2023
2024 default:
2025 break;
2026 }
2027
2028 /* Recursively scan the operands of this expression. */
2029
2030 {
2031 const char *fmt = GET_RTX_FORMAT (code);
2032 int i;
2033
2034 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2035 {
2036 if (fmt[i] == 'e')
2037 {
2038 if (side_effects_p (XEXP (x, i)))
2039 return 1;
2040 }
2041 else if (fmt[i] == 'E')
2042 {
2043 int j;
2044 for (j = 0; j < XVECLEN (x, i); j++)
2045 if (side_effects_p (XVECEXP (x, i, j)))
2046 return 1;
2047 }
2048 }
2049 }
2050 return 0;
2051 }
2052 \f
2053 enum may_trap_p_flags
2054 {
2055 MTP_UNALIGNED_MEMS = 1,
2056 MTP_AFTER_MOVE = 2
2057 };
2058 /* Return nonzero if evaluating rtx X might cause a trap.
2059 (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
2060 unaligned memory accesses on strict alignment machines. If
2061 (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
2062 cannot trap at its current location, but it might become trapping if moved
2063 elsewhere. */
2064
2065 static int
2066 may_trap_p_1 (rtx x, unsigned flags)
2067 {
2068 int i;
2069 enum rtx_code code;
2070 const char *fmt;
2071 bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
2072
2073 if (x == 0)
2074 return 0;
2075 code = GET_CODE (x);
2076 switch (code)
2077 {
2078 /* Handle these cases quickly. */
2079 case CONST_INT:
2080 case CONST_DOUBLE:
2081 case CONST_VECTOR:
2082 case SYMBOL_REF:
2083 case LABEL_REF:
2084 case CONST:
2085 case PC:
2086 case CC0:
2087 case REG:
2088 case SCRATCH:
2089 return 0;
2090
2091 case ASM_INPUT:
2092 case UNSPEC_VOLATILE:
2093 case TRAP_IF:
2094 return 1;
2095
2096 case ASM_OPERANDS:
2097 return MEM_VOLATILE_P (x);
2098
2099 /* Memory ref can trap unless it's a static var or a stack slot. */
2100 case MEM:
2101 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2102 reference; moving it out of condition might cause its address
2103 become invalid. */
2104 !(flags & MTP_AFTER_MOVE)
2105 && MEM_NOTRAP_P (x)
2106 && (!STRICT_ALIGNMENT || !unaligned_mems))
2107 return 0;
2108 return
2109 rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
2110
2111 /* Division by a non-constant might trap. */
2112 case DIV:
2113 case MOD:
2114 case UDIV:
2115 case UMOD:
2116 if (HONOR_SNANS (GET_MODE (x)))
2117 return 1;
2118 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2119 return flag_trapping_math;
2120 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2121 return 1;
2122 break;
2123
2124 case EXPR_LIST:
2125 /* An EXPR_LIST is used to represent a function call. This
2126 certainly may trap. */
2127 return 1;
2128
2129 case GE:
2130 case GT:
2131 case LE:
2132 case LT:
2133 case LTGT:
2134 case COMPARE:
2135 /* Some floating point comparisons may trap. */
2136 if (!flag_trapping_math)
2137 break;
2138 /* ??? There is no machine independent way to check for tests that trap
2139 when COMPARE is used, though many targets do make this distinction.
2140 For instance, sparc uses CCFPE for compares which generate exceptions
2141 and CCFP for compares which do not generate exceptions. */
2142 if (HONOR_NANS (GET_MODE (x)))
2143 return 1;
2144 /* But often the compare has some CC mode, so check operand
2145 modes as well. */
2146 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2147 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2148 return 1;
2149 break;
2150
2151 case EQ:
2152 case NE:
2153 if (HONOR_SNANS (GET_MODE (x)))
2154 return 1;
2155 /* Often comparison is CC mode, so check operand modes. */
2156 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2157 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2158 return 1;
2159 break;
2160
2161 case FIX:
2162 /* Conversion of floating point might trap. */
2163 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2164 return 1;
2165 break;
2166
2167 case NEG:
2168 case ABS:
2169 case SUBREG:
2170 /* These operations don't trap even with floating point. */
2171 break;
2172
2173 default:
2174 /* Any floating arithmetic may trap. */
2175 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2176 && flag_trapping_math)
2177 return 1;
2178 }
2179
2180 fmt = GET_RTX_FORMAT (code);
2181 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2182 {
2183 if (fmt[i] == 'e')
2184 {
2185 if (may_trap_p_1 (XEXP (x, i), flags))
2186 return 1;
2187 }
2188 else if (fmt[i] == 'E')
2189 {
2190 int j;
2191 for (j = 0; j < XVECLEN (x, i); j++)
2192 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2193 return 1;
2194 }
2195 }
2196 return 0;
2197 }
2198
2199 /* Return nonzero if evaluating rtx X might cause a trap. */
2200
2201 int
2202 may_trap_p (rtx x)
2203 {
2204 return may_trap_p_1 (x, 0);
2205 }
2206
2207 /* Return nonzero if evaluating rtx X might cause a trap, when the expression
2208 is moved from its current location by some optimization. */
2209
2210 int
2211 may_trap_after_code_motion_p (rtx x)
2212 {
2213 return may_trap_p_1 (x, MTP_AFTER_MOVE);
2214 }
2215
2216 /* Same as above, but additionally return nonzero if evaluating rtx X might
2217 cause a fault. We define a fault for the purpose of this function as a
2218 erroneous execution condition that cannot be encountered during the normal
2219 execution of a valid program; the typical example is an unaligned memory
2220 access on a strict alignment machine. The compiler guarantees that it
2221 doesn't generate code that will fault from a valid program, but this
2222 guarantee doesn't mean anything for individual instructions. Consider
2223 the following example:
2224
2225 struct S { int d; union { char *cp; int *ip; }; };
2226
2227 int foo(struct S *s)
2228 {
2229 if (s->d == 1)
2230 return *s->ip;
2231 else
2232 return *s->cp;
2233 }
2234
2235 on a strict alignment machine. In a valid program, foo will never be
2236 invoked on a structure for which d is equal to 1 and the underlying
2237 unique field of the union not aligned on a 4-byte boundary, but the
2238 expression *s->ip might cause a fault if considered individually.
2239
2240 At the RTL level, potentially problematic expressions will almost always
2241 verify may_trap_p; for example, the above dereference can be emitted as
2242 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2243 However, suppose that foo is inlined in a caller that causes s->cp to
2244 point to a local character variable and guarantees that s->d is not set
2245 to 1; foo may have been effectively translated into pseudo-RTL as:
2246
2247 if ((reg:SI) == 1)
2248 (set (reg:SI) (mem:SI (%fp - 7)))
2249 else
2250 (set (reg:QI) (mem:QI (%fp - 7)))
2251
2252 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2253 memory reference to a stack slot, but it will certainly cause a fault
2254 on a strict alignment machine. */
2255
2256 int
2257 may_trap_or_fault_p (rtx x)
2258 {
2259 return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
2260 }
2261 \f
2262 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2263 i.e., an inequality. */
2264
2265 int
2266 inequality_comparisons_p (rtx x)
2267 {
2268 const char *fmt;
2269 int len, i;
2270 enum rtx_code code = GET_CODE (x);
2271
2272 switch (code)
2273 {
2274 case REG:
2275 case SCRATCH:
2276 case PC:
2277 case CC0:
2278 case CONST_INT:
2279 case CONST_DOUBLE:
2280 case CONST_VECTOR:
2281 case CONST:
2282 case LABEL_REF:
2283 case SYMBOL_REF:
2284 return 0;
2285
2286 case LT:
2287 case LTU:
2288 case GT:
2289 case GTU:
2290 case LE:
2291 case LEU:
2292 case GE:
2293 case GEU:
2294 return 1;
2295
2296 default:
2297 break;
2298 }
2299
2300 len = GET_RTX_LENGTH (code);
2301 fmt = GET_RTX_FORMAT (code);
2302
2303 for (i = 0; i < len; i++)
2304 {
2305 if (fmt[i] == 'e')
2306 {
2307 if (inequality_comparisons_p (XEXP (x, i)))
2308 return 1;
2309 }
2310 else if (fmt[i] == 'E')
2311 {
2312 int j;
2313 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2314 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2315 return 1;
2316 }
2317 }
2318
2319 return 0;
2320 }
2321 \f
2322 /* Replace any occurrence of FROM in X with TO. The function does
2323 not enter into CONST_DOUBLE for the replace.
2324
2325 Note that copying is not done so X must not be shared unless all copies
2326 are to be modified. */
2327
2328 rtx
2329 replace_rtx (rtx x, rtx from, rtx to)
2330 {
2331 int i, j;
2332 const char *fmt;
2333
2334 /* The following prevents loops occurrence when we change MEM in
2335 CONST_DOUBLE onto the same CONST_DOUBLE. */
2336 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2337 return x;
2338
2339 if (x == from)
2340 return to;
2341
2342 /* Allow this function to make replacements in EXPR_LISTs. */
2343 if (x == 0)
2344 return 0;
2345
2346 if (GET_CODE (x) == SUBREG)
2347 {
2348 rtx new = replace_rtx (SUBREG_REG (x), from, to);
2349
2350 if (GET_CODE (new) == CONST_INT)
2351 {
2352 x = simplify_subreg (GET_MODE (x), new,
2353 GET_MODE (SUBREG_REG (x)),
2354 SUBREG_BYTE (x));
2355 gcc_assert (x);
2356 }
2357 else
2358 SUBREG_REG (x) = new;
2359
2360 return x;
2361 }
2362 else if (GET_CODE (x) == ZERO_EXTEND)
2363 {
2364 rtx new = replace_rtx (XEXP (x, 0), from, to);
2365
2366 if (GET_CODE (new) == CONST_INT)
2367 {
2368 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2369 new, GET_MODE (XEXP (x, 0)));
2370 gcc_assert (x);
2371 }
2372 else
2373 XEXP (x, 0) = new;
2374
2375 return x;
2376 }
2377
2378 fmt = GET_RTX_FORMAT (GET_CODE (x));
2379 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2380 {
2381 if (fmt[i] == 'e')
2382 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2383 else if (fmt[i] == 'E')
2384 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2385 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2386 }
2387
2388 return x;
2389 }
2390 \f
2391 /* Replace occurrences of the old label in *X with the new one.
2392 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2393
2394 int
2395 replace_label (rtx *x, void *data)
2396 {
2397 rtx l = *x;
2398 rtx old_label = ((replace_label_data *) data)->r1;
2399 rtx new_label = ((replace_label_data *) data)->r2;
2400 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2401
2402 if (l == NULL_RTX)
2403 return 0;
2404
2405 if (GET_CODE (l) == SYMBOL_REF
2406 && CONSTANT_POOL_ADDRESS_P (l))
2407 {
2408 rtx c = get_pool_constant (l);
2409 if (rtx_referenced_p (old_label, c))
2410 {
2411 rtx new_c, new_l;
2412 replace_label_data *d = (replace_label_data *) data;
2413
2414 /* Create a copy of constant C; replace the label inside
2415 but do not update LABEL_NUSES because uses in constant pool
2416 are not counted. */
2417 new_c = copy_rtx (c);
2418 d->update_label_nuses = false;
2419 for_each_rtx (&new_c, replace_label, data);
2420 d->update_label_nuses = update_label_nuses;
2421
2422 /* Add the new constant NEW_C to constant pool and replace
2423 the old reference to constant by new reference. */
2424 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2425 *x = replace_rtx (l, l, new_l);
2426 }
2427 return 0;
2428 }
2429
2430 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2431 field. This is not handled by for_each_rtx because it doesn't
2432 handle unprinted ('0') fields. */
2433 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2434 JUMP_LABEL (l) = new_label;
2435
2436 if ((GET_CODE (l) == LABEL_REF
2437 || GET_CODE (l) == INSN_LIST)
2438 && XEXP (l, 0) == old_label)
2439 {
2440 XEXP (l, 0) = new_label;
2441 if (update_label_nuses)
2442 {
2443 ++LABEL_NUSES (new_label);
2444 --LABEL_NUSES (old_label);
2445 }
2446 return 0;
2447 }
2448
2449 return 0;
2450 }
2451
2452 /* When *BODY is equal to X or X is directly referenced by *BODY
2453 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2454 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2455
2456 static int
2457 rtx_referenced_p_1 (rtx *body, void *x)
2458 {
2459 rtx y = (rtx) x;
2460
2461 if (*body == NULL_RTX)
2462 return y == NULL_RTX;
2463
2464 /* Return true if a label_ref *BODY refers to label Y. */
2465 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2466 return XEXP (*body, 0) == y;
2467
2468 /* If *BODY is a reference to pool constant traverse the constant. */
2469 if (GET_CODE (*body) == SYMBOL_REF
2470 && CONSTANT_POOL_ADDRESS_P (*body))
2471 return rtx_referenced_p (y, get_pool_constant (*body));
2472
2473 /* By default, compare the RTL expressions. */
2474 return rtx_equal_p (*body, y);
2475 }
2476
2477 /* Return true if X is referenced in BODY. */
2478
2479 int
2480 rtx_referenced_p (rtx x, rtx body)
2481 {
2482 return for_each_rtx (&body, rtx_referenced_p_1, x);
2483 }
2484
2485 /* If INSN is a tablejump return true and store the label (before jump table) to
2486 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2487
2488 bool
2489 tablejump_p (rtx insn, rtx *labelp, rtx *tablep)
2490 {
2491 rtx label, table;
2492
2493 if (JUMP_P (insn)
2494 && (label = JUMP_LABEL (insn)) != NULL_RTX
2495 && (table = next_active_insn (label)) != NULL_RTX
2496 && JUMP_P (table)
2497 && (GET_CODE (PATTERN (table)) == ADDR_VEC
2498 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2499 {
2500 if (labelp)
2501 *labelp = label;
2502 if (tablep)
2503 *tablep = table;
2504 return true;
2505 }
2506 return false;
2507 }
2508
2509 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2510 constant that is not in the constant pool and not in the condition
2511 of an IF_THEN_ELSE. */
2512
2513 static int
2514 computed_jump_p_1 (rtx x)
2515 {
2516 enum rtx_code code = GET_CODE (x);
2517 int i, j;
2518 const char *fmt;
2519
2520 switch (code)
2521 {
2522 case LABEL_REF:
2523 case PC:
2524 return 0;
2525
2526 case CONST:
2527 case CONST_INT:
2528 case CONST_DOUBLE:
2529 case CONST_VECTOR:
2530 case SYMBOL_REF:
2531 case REG:
2532 return 1;
2533
2534 case MEM:
2535 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2536 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2537
2538 case IF_THEN_ELSE:
2539 return (computed_jump_p_1 (XEXP (x, 1))
2540 || computed_jump_p_1 (XEXP (x, 2)));
2541
2542 default:
2543 break;
2544 }
2545
2546 fmt = GET_RTX_FORMAT (code);
2547 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2548 {
2549 if (fmt[i] == 'e'
2550 && computed_jump_p_1 (XEXP (x, i)))
2551 return 1;
2552
2553 else if (fmt[i] == 'E')
2554 for (j = 0; j < XVECLEN (x, i); j++)
2555 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2556 return 1;
2557 }
2558
2559 return 0;
2560 }
2561
2562 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2563
2564 Tablejumps and casesi insns are not considered indirect jumps;
2565 we can recognize them by a (use (label_ref)). */
2566
2567 int
2568 computed_jump_p (rtx insn)
2569 {
2570 int i;
2571 if (JUMP_P (insn))
2572 {
2573 rtx pat = PATTERN (insn);
2574
2575 if (find_reg_note (insn, REG_LABEL, NULL_RTX))
2576 return 0;
2577 else if (GET_CODE (pat) == PARALLEL)
2578 {
2579 int len = XVECLEN (pat, 0);
2580 int has_use_labelref = 0;
2581
2582 for (i = len - 1; i >= 0; i--)
2583 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2584 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2585 == LABEL_REF))
2586 has_use_labelref = 1;
2587
2588 if (! has_use_labelref)
2589 for (i = len - 1; i >= 0; i--)
2590 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2591 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2592 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2593 return 1;
2594 }
2595 else if (GET_CODE (pat) == SET
2596 && SET_DEST (pat) == pc_rtx
2597 && computed_jump_p_1 (SET_SRC (pat)))
2598 return 1;
2599 }
2600 return 0;
2601 }
2602
2603 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2604 calls. Processes the subexpressions of EXP and passes them to F. */
2605 static int
2606 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2607 {
2608 int result, i, j;
2609 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2610 rtx *x;
2611
2612 for (; format[n] != '\0'; n++)
2613 {
2614 switch (format[n])
2615 {
2616 case 'e':
2617 /* Call F on X. */
2618 x = &XEXP (exp, n);
2619 result = (*f) (x, data);
2620 if (result == -1)
2621 /* Do not traverse sub-expressions. */
2622 continue;
2623 else if (result != 0)
2624 /* Stop the traversal. */
2625 return result;
2626
2627 if (*x == NULL_RTX)
2628 /* There are no sub-expressions. */
2629 continue;
2630
2631 i = non_rtx_starting_operands[GET_CODE (*x)];
2632 if (i >= 0)
2633 {
2634 result = for_each_rtx_1 (*x, i, f, data);
2635 if (result != 0)
2636 return result;
2637 }
2638 break;
2639
2640 case 'V':
2641 case 'E':
2642 if (XVEC (exp, n) == 0)
2643 continue;
2644 for (j = 0; j < XVECLEN (exp, n); ++j)
2645 {
2646 /* Call F on X. */
2647 x = &XVECEXP (exp, n, j);
2648 result = (*f) (x, data);
2649 if (result == -1)
2650 /* Do not traverse sub-expressions. */
2651 continue;
2652 else if (result != 0)
2653 /* Stop the traversal. */
2654 return result;
2655
2656 if (*x == NULL_RTX)
2657 /* There are no sub-expressions. */
2658 continue;
2659
2660 i = non_rtx_starting_operands[GET_CODE (*x)];
2661 if (i >= 0)
2662 {
2663 result = for_each_rtx_1 (*x, i, f, data);
2664 if (result != 0)
2665 return result;
2666 }
2667 }
2668 break;
2669
2670 default:
2671 /* Nothing to do. */
2672 break;
2673 }
2674 }
2675
2676 return 0;
2677 }
2678
2679 /* Traverse X via depth-first search, calling F for each
2680 sub-expression (including X itself). F is also passed the DATA.
2681 If F returns -1, do not traverse sub-expressions, but continue
2682 traversing the rest of the tree. If F ever returns any other
2683 nonzero value, stop the traversal, and return the value returned
2684 by F. Otherwise, return 0. This function does not traverse inside
2685 tree structure that contains RTX_EXPRs, or into sub-expressions
2686 whose format code is `0' since it is not known whether or not those
2687 codes are actually RTL.
2688
2689 This routine is very general, and could (should?) be used to
2690 implement many of the other routines in this file. */
2691
2692 int
2693 for_each_rtx (rtx *x, rtx_function f, void *data)
2694 {
2695 int result;
2696 int i;
2697
2698 /* Call F on X. */
2699 result = (*f) (x, data);
2700 if (result == -1)
2701 /* Do not traverse sub-expressions. */
2702 return 0;
2703 else if (result != 0)
2704 /* Stop the traversal. */
2705 return result;
2706
2707 if (*x == NULL_RTX)
2708 /* There are no sub-expressions. */
2709 return 0;
2710
2711 i = non_rtx_starting_operands[GET_CODE (*x)];
2712 if (i < 0)
2713 return 0;
2714
2715 return for_each_rtx_1 (*x, i, f, data);
2716 }
2717
2718
2719 /* Searches X for any reference to REGNO, returning the rtx of the
2720 reference found if any. Otherwise, returns NULL_RTX. */
2721
2722 rtx
2723 regno_use_in (unsigned int regno, rtx x)
2724 {
2725 const char *fmt;
2726 int i, j;
2727 rtx tem;
2728
2729 if (REG_P (x) && REGNO (x) == regno)
2730 return x;
2731
2732 fmt = GET_RTX_FORMAT (GET_CODE (x));
2733 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2734 {
2735 if (fmt[i] == 'e')
2736 {
2737 if ((tem = regno_use_in (regno, XEXP (x, i))))
2738 return tem;
2739 }
2740 else if (fmt[i] == 'E')
2741 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2742 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2743 return tem;
2744 }
2745
2746 return NULL_RTX;
2747 }
2748
2749 /* Return a value indicating whether OP, an operand of a commutative
2750 operation, is preferred as the first or second operand. The higher
2751 the value, the stronger the preference for being the first operand.
2752 We use negative values to indicate a preference for the first operand
2753 and positive values for the second operand. */
2754
2755 int
2756 commutative_operand_precedence (rtx op)
2757 {
2758 enum rtx_code code = GET_CODE (op);
2759
2760 /* Constants always come the second operand. Prefer "nice" constants. */
2761 if (code == CONST_INT)
2762 return -7;
2763 if (code == CONST_DOUBLE)
2764 return -6;
2765 op = avoid_constant_pool_reference (op);
2766 code = GET_CODE (op);
2767
2768 switch (GET_RTX_CLASS (code))
2769 {
2770 case RTX_CONST_OBJ:
2771 if (code == CONST_INT)
2772 return -5;
2773 if (code == CONST_DOUBLE)
2774 return -4;
2775 return -3;
2776
2777 case RTX_EXTRA:
2778 /* SUBREGs of objects should come second. */
2779 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2780 return -2;
2781
2782 if (!CONSTANT_P (op))
2783 return 0;
2784 else
2785 /* As for RTX_CONST_OBJ. */
2786 return -3;
2787
2788 case RTX_OBJ:
2789 /* Complex expressions should be the first, so decrease priority
2790 of objects. */
2791 return -1;
2792
2793 case RTX_COMM_ARITH:
2794 /* Prefer operands that are themselves commutative to be first.
2795 This helps to make things linear. In particular,
2796 (and (and (reg) (reg)) (not (reg))) is canonical. */
2797 return 4;
2798
2799 case RTX_BIN_ARITH:
2800 /* If only one operand is a binary expression, it will be the first
2801 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2802 is canonical, although it will usually be further simplified. */
2803 return 2;
2804
2805 case RTX_UNARY:
2806 /* Then prefer NEG and NOT. */
2807 if (code == NEG || code == NOT)
2808 return 1;
2809
2810 default:
2811 return 0;
2812 }
2813 }
2814
2815 /* Return 1 iff it is necessary to swap operands of commutative operation
2816 in order to canonicalize expression. */
2817
2818 int
2819 swap_commutative_operands_p (rtx x, rtx y)
2820 {
2821 return (commutative_operand_precedence (x)
2822 < commutative_operand_precedence (y));
2823 }
2824
2825 /* Return 1 if X is an autoincrement side effect and the register is
2826 not the stack pointer. */
2827 int
2828 auto_inc_p (rtx x)
2829 {
2830 switch (GET_CODE (x))
2831 {
2832 case PRE_INC:
2833 case POST_INC:
2834 case PRE_DEC:
2835 case POST_DEC:
2836 case PRE_MODIFY:
2837 case POST_MODIFY:
2838 /* There are no REG_INC notes for SP. */
2839 if (XEXP (x, 0) != stack_pointer_rtx)
2840 return 1;
2841 default:
2842 break;
2843 }
2844 return 0;
2845 }
2846
2847 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2848 int
2849 loc_mentioned_in_p (rtx *loc, rtx in)
2850 {
2851 enum rtx_code code;
2852 const char *fmt;
2853 int i, j;
2854
2855 if (!in)
2856 return 0;
2857
2858 code = GET_CODE (in);
2859 fmt = GET_RTX_FORMAT (code);
2860 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2861 {
2862 if (loc == &in->u.fld[i].rt_rtx)
2863 return 1;
2864 if (fmt[i] == 'e')
2865 {
2866 if (loc_mentioned_in_p (loc, XEXP (in, i)))
2867 return 1;
2868 }
2869 else if (fmt[i] == 'E')
2870 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
2871 if (loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
2872 return 1;
2873 }
2874 return 0;
2875 }
2876
2877 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
2878 and SUBREG_BYTE, return the bit offset where the subreg begins
2879 (counting from the least significant bit of the operand). */
2880
2881 unsigned int
2882 subreg_lsb_1 (enum machine_mode outer_mode,
2883 enum machine_mode inner_mode,
2884 unsigned int subreg_byte)
2885 {
2886 unsigned int bitpos;
2887 unsigned int byte;
2888 unsigned int word;
2889
2890 /* A paradoxical subreg begins at bit position 0. */
2891 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
2892 return 0;
2893
2894 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
2895 /* If the subreg crosses a word boundary ensure that
2896 it also begins and ends on a word boundary. */
2897 gcc_assert (!((subreg_byte % UNITS_PER_WORD
2898 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
2899 && (subreg_byte % UNITS_PER_WORD
2900 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
2901
2902 if (WORDS_BIG_ENDIAN)
2903 word = (GET_MODE_SIZE (inner_mode)
2904 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
2905 else
2906 word = subreg_byte / UNITS_PER_WORD;
2907 bitpos = word * BITS_PER_WORD;
2908
2909 if (BYTES_BIG_ENDIAN)
2910 byte = (GET_MODE_SIZE (inner_mode)
2911 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
2912 else
2913 byte = subreg_byte % UNITS_PER_WORD;
2914 bitpos += byte * BITS_PER_UNIT;
2915
2916 return bitpos;
2917 }
2918
2919 /* Given a subreg X, return the bit offset where the subreg begins
2920 (counting from the least significant bit of the reg). */
2921
2922 unsigned int
2923 subreg_lsb (rtx x)
2924 {
2925 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
2926 SUBREG_BYTE (x));
2927 }
2928
2929 /* This function returns the regno offset of a subreg expression.
2930 xregno - A regno of an inner hard subreg_reg (or what will become one).
2931 xmode - The mode of xregno.
2932 offset - The byte offset.
2933 ymode - The mode of a top level SUBREG (or what may become one).
2934 RETURN - The regno offset which would be used. */
2935 unsigned int
2936 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
2937 unsigned int offset, enum machine_mode ymode)
2938 {
2939 int nregs_xmode, nregs_ymode;
2940 int mode_multiple, nregs_multiple;
2941 int y_offset;
2942
2943 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
2944
2945 /* Adjust nregs_xmode to allow for 'holes'. */
2946 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
2947 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
2948 else
2949 nregs_xmode = hard_regno_nregs[xregno][xmode];
2950
2951 nregs_ymode = hard_regno_nregs[xregno][ymode];
2952
2953 /* If this is a big endian paradoxical subreg, which uses more actual
2954 hard registers than the original register, we must return a negative
2955 offset so that we find the proper highpart of the register. */
2956 if (offset == 0
2957 && nregs_ymode > nregs_xmode
2958 && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
2959 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
2960 return nregs_xmode - nregs_ymode;
2961
2962 if (offset == 0 || nregs_xmode == nregs_ymode)
2963 return 0;
2964
2965 /* Size of ymode must not be greater than the size of xmode. */
2966 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
2967 gcc_assert (mode_multiple != 0);
2968
2969 y_offset = offset / GET_MODE_SIZE (ymode);
2970 nregs_multiple = nregs_xmode / nregs_ymode;
2971 return (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
2972 }
2973
2974 /* This function returns true when the offset is representable via
2975 subreg_offset in the given regno.
2976 xregno - A regno of an inner hard subreg_reg (or what will become one).
2977 xmode - The mode of xregno.
2978 offset - The byte offset.
2979 ymode - The mode of a top level SUBREG (or what may become one).
2980 RETURN - Whether the offset is representable. */
2981 bool
2982 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
2983 unsigned int offset, enum machine_mode ymode)
2984 {
2985 int nregs_xmode, nregs_ymode;
2986 int mode_multiple, nregs_multiple;
2987 int y_offset;
2988 int regsize_xmode, regsize_ymode;
2989
2990 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
2991
2992 /* If there are holes in a non-scalar mode in registers, we expect
2993 that it is made up of its units concatenated together. */
2994 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
2995 {
2996 enum machine_mode xmode_unit;
2997
2998 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
2999 if (GET_MODE_INNER (xmode) == VOIDmode)
3000 xmode_unit = xmode;
3001 else
3002 xmode_unit = GET_MODE_INNER (xmode);
3003 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3004 gcc_assert (nregs_xmode
3005 == (GET_MODE_NUNITS (xmode)
3006 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3007 gcc_assert (hard_regno_nregs[xregno][xmode]
3008 == (hard_regno_nregs[xregno][xmode_unit]
3009 * GET_MODE_NUNITS (xmode)));
3010
3011 /* You can only ask for a SUBREG of a value with holes in the middle
3012 if you don't cross the holes. (Such a SUBREG should be done by
3013 picking a different register class, or doing it in memory if
3014 necessary.) An example of a value with holes is XCmode on 32-bit
3015 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3016 3 for each part, but in memory it's two 128-bit parts.
3017 Padding is assumed to be at the end (not necessarily the 'high part')
3018 of each unit. */
3019 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3020 < GET_MODE_NUNITS (xmode))
3021 && (offset / GET_MODE_SIZE (xmode_unit)
3022 != ((offset + GET_MODE_SIZE (ymode) - 1)
3023 / GET_MODE_SIZE (xmode_unit))))
3024 return false;
3025 }
3026 else
3027 nregs_xmode = hard_regno_nregs[xregno][xmode];
3028
3029 nregs_ymode = hard_regno_nregs[xregno][ymode];
3030
3031 /* Paradoxical subregs are otherwise valid. */
3032 if (offset == 0
3033 && nregs_ymode > nregs_xmode
3034 && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3035 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
3036 return true;
3037
3038 /* If registers store different numbers of bits in the different
3039 modes, we cannot generally form this subreg. */
3040 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3041 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3042 if (regsize_xmode > regsize_ymode && nregs_ymode > 1)
3043 return false;
3044 if (regsize_ymode > regsize_xmode && nregs_xmode > 1)
3045 return false;
3046
3047 /* Lowpart subregs are otherwise valid. */
3048 if (offset == subreg_lowpart_offset (ymode, xmode))
3049 return true;
3050
3051 /* This should always pass, otherwise we don't know how to verify
3052 the constraint. These conditions may be relaxed but
3053 subreg_regno_offset would need to be redesigned. */
3054 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3055 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3056
3057 /* The XMODE value can be seen as a vector of NREGS_XMODE
3058 values. The subreg must represent a lowpart of given field.
3059 Compute what field it is. */
3060 offset -= subreg_lowpart_offset (ymode,
3061 mode_for_size (GET_MODE_BITSIZE (xmode)
3062 / nregs_xmode,
3063 MODE_INT, 0));
3064
3065 /* Size of ymode must not be greater than the size of xmode. */
3066 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3067 gcc_assert (mode_multiple != 0);
3068
3069 y_offset = offset / GET_MODE_SIZE (ymode);
3070 nregs_multiple = nregs_xmode / nregs_ymode;
3071
3072 gcc_assert ((offset % GET_MODE_SIZE (ymode)) == 0);
3073 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3074
3075 return (!(y_offset % (mode_multiple / nregs_multiple)));
3076 }
3077
3078 /* Return the final regno that a subreg expression refers to. */
3079 unsigned int
3080 subreg_regno (rtx x)
3081 {
3082 unsigned int ret;
3083 rtx subreg = SUBREG_REG (x);
3084 int regno = REGNO (subreg);
3085
3086 ret = regno + subreg_regno_offset (regno,
3087 GET_MODE (subreg),
3088 SUBREG_BYTE (x),
3089 GET_MODE (x));
3090 return ret;
3091
3092 }
3093 struct parms_set_data
3094 {
3095 int nregs;
3096 HARD_REG_SET regs;
3097 };
3098
3099 /* Helper function for noticing stores to parameter registers. */
3100 static void
3101 parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3102 {
3103 struct parms_set_data *d = data;
3104 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3105 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3106 {
3107 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3108 d->nregs--;
3109 }
3110 }
3111
3112 /* Look backward for first parameter to be loaded.
3113 Note that loads of all parameters will not necessarily be
3114 found if CSE has eliminated some of them (e.g., an argument
3115 to the outer function is passed down as a parameter).
3116 Do not skip BOUNDARY. */
3117 rtx
3118 find_first_parameter_load (rtx call_insn, rtx boundary)
3119 {
3120 struct parms_set_data parm;
3121 rtx p, before, first_set;
3122
3123 /* Since different machines initialize their parameter registers
3124 in different orders, assume nothing. Collect the set of all
3125 parameter registers. */
3126 CLEAR_HARD_REG_SET (parm.regs);
3127 parm.nregs = 0;
3128 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3129 if (GET_CODE (XEXP (p, 0)) == USE
3130 && REG_P (XEXP (XEXP (p, 0), 0)))
3131 {
3132 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3133
3134 /* We only care about registers which can hold function
3135 arguments. */
3136 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3137 continue;
3138
3139 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3140 parm.nregs++;
3141 }
3142 before = call_insn;
3143 first_set = call_insn;
3144
3145 /* Search backward for the first set of a register in this set. */
3146 while (parm.nregs && before != boundary)
3147 {
3148 before = PREV_INSN (before);
3149
3150 /* It is possible that some loads got CSEed from one call to
3151 another. Stop in that case. */
3152 if (CALL_P (before))
3153 break;
3154
3155 /* Our caller needs either ensure that we will find all sets
3156 (in case code has not been optimized yet), or take care
3157 for possible labels in a way by setting boundary to preceding
3158 CODE_LABEL. */
3159 if (LABEL_P (before))
3160 {
3161 gcc_assert (before == boundary);
3162 break;
3163 }
3164
3165 if (INSN_P (before))
3166 {
3167 int nregs_old = parm.nregs;
3168 note_stores (PATTERN (before), parms_set, &parm);
3169 /* If we found something that did not set a parameter reg,
3170 we're done. Do not keep going, as that might result
3171 in hoisting an insn before the setting of a pseudo
3172 that is used by the hoisted insn. */
3173 if (nregs_old != parm.nregs)
3174 first_set = before;
3175 else
3176 break;
3177 }
3178 }
3179 return first_set;
3180 }
3181
3182 /* Return true if we should avoid inserting code between INSN and preceding
3183 call instruction. */
3184
3185 bool
3186 keep_with_call_p (rtx insn)
3187 {
3188 rtx set;
3189
3190 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3191 {
3192 if (REG_P (SET_DEST (set))
3193 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3194 && fixed_regs[REGNO (SET_DEST (set))]
3195 && general_operand (SET_SRC (set), VOIDmode))
3196 return true;
3197 if (REG_P (SET_SRC (set))
3198 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
3199 && REG_P (SET_DEST (set))
3200 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3201 return true;
3202 /* There may be a stack pop just after the call and before the store
3203 of the return register. Search for the actual store when deciding
3204 if we can break or not. */
3205 if (SET_DEST (set) == stack_pointer_rtx)
3206 {
3207 rtx i2 = next_nonnote_insn (insn);
3208 if (i2 && keep_with_call_p (i2))
3209 return true;
3210 }
3211 }
3212 return false;
3213 }
3214
3215 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3216 to non-complex jumps. That is, direct unconditional, conditional,
3217 and tablejumps, but not computed jumps or returns. It also does
3218 not apply to the fallthru case of a conditional jump. */
3219
3220 bool
3221 label_is_jump_target_p (rtx label, rtx jump_insn)
3222 {
3223 rtx tmp = JUMP_LABEL (jump_insn);
3224
3225 if (label == tmp)
3226 return true;
3227
3228 if (tablejump_p (jump_insn, NULL, &tmp))
3229 {
3230 rtvec vec = XVEC (PATTERN (tmp),
3231 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3232 int i, veclen = GET_NUM_ELEM (vec);
3233
3234 for (i = 0; i < veclen; ++i)
3235 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3236 return true;
3237 }
3238
3239 return false;
3240 }
3241
3242 \f
3243 /* Return an estimate of the cost of computing rtx X.
3244 One use is in cse, to decide which expression to keep in the hash table.
3245 Another is in rtl generation, to pick the cheapest way to multiply.
3246 Other uses like the latter are expected in the future. */
3247
3248 int
3249 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
3250 {
3251 int i, j;
3252 enum rtx_code code;
3253 const char *fmt;
3254 int total;
3255
3256 if (x == 0)
3257 return 0;
3258
3259 /* Compute the default costs of certain things.
3260 Note that targetm.rtx_costs can override the defaults. */
3261
3262 code = GET_CODE (x);
3263 switch (code)
3264 {
3265 case MULT:
3266 total = COSTS_N_INSNS (5);
3267 break;
3268 case DIV:
3269 case UDIV:
3270 case MOD:
3271 case UMOD:
3272 total = COSTS_N_INSNS (7);
3273 break;
3274 case USE:
3275 /* Used in combine.c as a marker. */
3276 total = 0;
3277 break;
3278 default:
3279 total = COSTS_N_INSNS (1);
3280 }
3281
3282 switch (code)
3283 {
3284 case REG:
3285 return 0;
3286
3287 case SUBREG:
3288 total = 0;
3289 /* If we can't tie these modes, make this expensive. The larger
3290 the mode, the more expensive it is. */
3291 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3292 return COSTS_N_INSNS (2
3293 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3294 break;
3295
3296 default:
3297 if (targetm.rtx_costs (x, code, outer_code, &total))
3298 return total;
3299 break;
3300 }
3301
3302 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3303 which is already in total. */
3304
3305 fmt = GET_RTX_FORMAT (code);
3306 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3307 if (fmt[i] == 'e')
3308 total += rtx_cost (XEXP (x, i), code);
3309 else if (fmt[i] == 'E')
3310 for (j = 0; j < XVECLEN (x, i); j++)
3311 total += rtx_cost (XVECEXP (x, i, j), code);
3312
3313 return total;
3314 }
3315 \f
3316 /* Return cost of address expression X.
3317 Expect that X is properly formed address reference. */
3318
3319 int
3320 address_cost (rtx x, enum machine_mode mode)
3321 {
3322 /* We may be asked for cost of various unusual addresses, such as operands
3323 of push instruction. It is not worthwhile to complicate writing
3324 of the target hook by such cases. */
3325
3326 if (!memory_address_p (mode, x))
3327 return 1000;
3328
3329 return targetm.address_cost (x);
3330 }
3331
3332 /* If the target doesn't override, compute the cost as with arithmetic. */
3333
3334 int
3335 default_address_cost (rtx x)
3336 {
3337 return rtx_cost (x, MEM);
3338 }
3339 \f
3340
3341 unsigned HOST_WIDE_INT
3342 nonzero_bits (rtx x, enum machine_mode mode)
3343 {
3344 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3345 }
3346
3347 unsigned int
3348 num_sign_bit_copies (rtx x, enum machine_mode mode)
3349 {
3350 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3351 }
3352
3353 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3354 It avoids exponential behavior in nonzero_bits1 when X has
3355 identical subexpressions on the first or the second level. */
3356
3357 static unsigned HOST_WIDE_INT
3358 cached_nonzero_bits (rtx x, enum machine_mode mode, rtx known_x,
3359 enum machine_mode known_mode,
3360 unsigned HOST_WIDE_INT known_ret)
3361 {
3362 if (x == known_x && mode == known_mode)
3363 return known_ret;
3364
3365 /* Try to find identical subexpressions. If found call
3366 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3367 precomputed value for the subexpression as KNOWN_RET. */
3368
3369 if (ARITHMETIC_P (x))
3370 {
3371 rtx x0 = XEXP (x, 0);
3372 rtx x1 = XEXP (x, 1);
3373
3374 /* Check the first level. */
3375 if (x0 == x1)
3376 return nonzero_bits1 (x, mode, x0, mode,
3377 cached_nonzero_bits (x0, mode, known_x,
3378 known_mode, known_ret));
3379
3380 /* Check the second level. */
3381 if (ARITHMETIC_P (x0)
3382 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3383 return nonzero_bits1 (x, mode, x1, mode,
3384 cached_nonzero_bits (x1, mode, known_x,
3385 known_mode, known_ret));
3386
3387 if (ARITHMETIC_P (x1)
3388 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3389 return nonzero_bits1 (x, mode, x0, mode,
3390 cached_nonzero_bits (x0, mode, known_x,
3391 known_mode, known_ret));
3392 }
3393
3394 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3395 }
3396
3397 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3398 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3399 is less useful. We can't allow both, because that results in exponential
3400 run time recursion. There is a nullstone testcase that triggered
3401 this. This macro avoids accidental uses of num_sign_bit_copies. */
3402 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3403
3404 /* Given an expression, X, compute which bits in X can be nonzero.
3405 We don't care about bits outside of those defined in MODE.
3406
3407 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3408 an arithmetic operation, we can do better. */
3409
3410 static unsigned HOST_WIDE_INT
3411 nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x,
3412 enum machine_mode known_mode,
3413 unsigned HOST_WIDE_INT known_ret)
3414 {
3415 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3416 unsigned HOST_WIDE_INT inner_nz;
3417 enum rtx_code code;
3418 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3419
3420 /* For floating-point values, assume all bits are needed. */
3421 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
3422 return nonzero;
3423
3424 /* If X is wider than MODE, use its mode instead. */
3425 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3426 {
3427 mode = GET_MODE (x);
3428 nonzero = GET_MODE_MASK (mode);
3429 mode_width = GET_MODE_BITSIZE (mode);
3430 }
3431
3432 if (mode_width > HOST_BITS_PER_WIDE_INT)
3433 /* Our only callers in this case look for single bit values. So
3434 just return the mode mask. Those tests will then be false. */
3435 return nonzero;
3436
3437 #ifndef WORD_REGISTER_OPERATIONS
3438 /* If MODE is wider than X, but both are a single word for both the host
3439 and target machines, we can compute this from which bits of the
3440 object might be nonzero in its own mode, taking into account the fact
3441 that on many CISC machines, accessing an object in a wider mode
3442 causes the high-order bits to become undefined. So they are
3443 not known to be zero. */
3444
3445 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3446 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3447 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3448 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3449 {
3450 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3451 known_x, known_mode, known_ret);
3452 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3453 return nonzero;
3454 }
3455 #endif
3456
3457 code = GET_CODE (x);
3458 switch (code)
3459 {
3460 case REG:
3461 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3462 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3463 all the bits above ptr_mode are known to be zero. */
3464 if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3465 && REG_POINTER (x))
3466 nonzero &= GET_MODE_MASK (ptr_mode);
3467 #endif
3468
3469 /* Include declared information about alignment of pointers. */
3470 /* ??? We don't properly preserve REG_POINTER changes across
3471 pointer-to-integer casts, so we can't trust it except for
3472 things that we know must be pointers. See execute/960116-1.c. */
3473 if ((x == stack_pointer_rtx
3474 || x == frame_pointer_rtx
3475 || x == arg_pointer_rtx)
3476 && REGNO_POINTER_ALIGN (REGNO (x)))
3477 {
3478 unsigned HOST_WIDE_INT alignment
3479 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3480
3481 #ifdef PUSH_ROUNDING
3482 /* If PUSH_ROUNDING is defined, it is possible for the
3483 stack to be momentarily aligned only to that amount,
3484 so we pick the least alignment. */
3485 if (x == stack_pointer_rtx && PUSH_ARGS)
3486 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3487 alignment);
3488 #endif
3489
3490 nonzero &= ~(alignment - 1);
3491 }
3492
3493 {
3494 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3495 rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3496 known_mode, known_ret,
3497 &nonzero_for_hook);
3498
3499 if (new)
3500 nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
3501 known_mode, known_ret);
3502
3503 return nonzero_for_hook;
3504 }
3505
3506 case CONST_INT:
3507 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3508 /* If X is negative in MODE, sign-extend the value. */
3509 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3510 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3511 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3512 #endif
3513
3514 return INTVAL (x);
3515
3516 case MEM:
3517 #ifdef LOAD_EXTEND_OP
3518 /* In many, if not most, RISC machines, reading a byte from memory
3519 zeros the rest of the register. Noticing that fact saves a lot
3520 of extra zero-extends. */
3521 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3522 nonzero &= GET_MODE_MASK (GET_MODE (x));
3523 #endif
3524 break;
3525
3526 case EQ: case NE:
3527 case UNEQ: case LTGT:
3528 case GT: case GTU: case UNGT:
3529 case LT: case LTU: case UNLT:
3530 case GE: case GEU: case UNGE:
3531 case LE: case LEU: case UNLE:
3532 case UNORDERED: case ORDERED:
3533 /* If this produces an integer result, we know which bits are set.
3534 Code here used to clear bits outside the mode of X, but that is
3535 now done above. */
3536 /* Mind that MODE is the mode the caller wants to look at this
3537 operation in, and not the actual operation mode. We can wind
3538 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3539 that describes the results of a vector compare. */
3540 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3541 && mode_width <= HOST_BITS_PER_WIDE_INT)
3542 nonzero = STORE_FLAG_VALUE;
3543 break;
3544
3545 case NEG:
3546 #if 0
3547 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3548 and num_sign_bit_copies. */
3549 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3550 == GET_MODE_BITSIZE (GET_MODE (x)))
3551 nonzero = 1;
3552 #endif
3553
3554 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3555 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3556 break;
3557
3558 case ABS:
3559 #if 0
3560 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3561 and num_sign_bit_copies. */
3562 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3563 == GET_MODE_BITSIZE (GET_MODE (x)))
3564 nonzero = 1;
3565 #endif
3566 break;
3567
3568 case TRUNCATE:
3569 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3570 known_x, known_mode, known_ret)
3571 & GET_MODE_MASK (mode));
3572 break;
3573
3574 case ZERO_EXTEND:
3575 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3576 known_x, known_mode, known_ret);
3577 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3578 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3579 break;
3580
3581 case SIGN_EXTEND:
3582 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3583 Otherwise, show all the bits in the outer mode but not the inner
3584 may be nonzero. */
3585 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3586 known_x, known_mode, known_ret);
3587 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3588 {
3589 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3590 if (inner_nz
3591 & (((HOST_WIDE_INT) 1
3592 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3593 inner_nz |= (GET_MODE_MASK (mode)
3594 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3595 }
3596
3597 nonzero &= inner_nz;
3598 break;
3599
3600 case AND:
3601 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3602 known_x, known_mode, known_ret)
3603 & cached_nonzero_bits (XEXP (x, 1), mode,
3604 known_x, known_mode, known_ret);
3605 break;
3606
3607 case XOR: case IOR:
3608 case UMIN: case UMAX: case SMIN: case SMAX:
3609 {
3610 unsigned HOST_WIDE_INT nonzero0 =
3611 cached_nonzero_bits (XEXP (x, 0), mode,
3612 known_x, known_mode, known_ret);
3613
3614 /* Don't call nonzero_bits for the second time if it cannot change
3615 anything. */
3616 if ((nonzero & nonzero0) != nonzero)
3617 nonzero &= nonzero0
3618 | cached_nonzero_bits (XEXP (x, 1), mode,
3619 known_x, known_mode, known_ret);
3620 }
3621 break;
3622
3623 case PLUS: case MINUS:
3624 case MULT:
3625 case DIV: case UDIV:
3626 case MOD: case UMOD:
3627 /* We can apply the rules of arithmetic to compute the number of
3628 high- and low-order zero bits of these operations. We start by
3629 computing the width (position of the highest-order nonzero bit)
3630 and the number of low-order zero bits for each value. */
3631 {
3632 unsigned HOST_WIDE_INT nz0 =
3633 cached_nonzero_bits (XEXP (x, 0), mode,
3634 known_x, known_mode, known_ret);
3635 unsigned HOST_WIDE_INT nz1 =
3636 cached_nonzero_bits (XEXP (x, 1), mode,
3637 known_x, known_mode, known_ret);
3638 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3639 int width0 = floor_log2 (nz0) + 1;
3640 int width1 = floor_log2 (nz1) + 1;
3641 int low0 = floor_log2 (nz0 & -nz0);
3642 int low1 = floor_log2 (nz1 & -nz1);
3643 HOST_WIDE_INT op0_maybe_minusp
3644 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3645 HOST_WIDE_INT op1_maybe_minusp
3646 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3647 unsigned int result_width = mode_width;
3648 int result_low = 0;
3649
3650 switch (code)
3651 {
3652 case PLUS:
3653 result_width = MAX (width0, width1) + 1;
3654 result_low = MIN (low0, low1);
3655 break;
3656 case MINUS:
3657 result_low = MIN (low0, low1);
3658 break;
3659 case MULT:
3660 result_width = width0 + width1;
3661 result_low = low0 + low1;
3662 break;
3663 case DIV:
3664 if (width1 == 0)
3665 break;
3666 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3667 result_width = width0;
3668 break;
3669 case UDIV:
3670 if (width1 == 0)
3671 break;
3672 result_width = width0;
3673 break;
3674 case MOD:
3675 if (width1 == 0)
3676 break;
3677 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3678 result_width = MIN (width0, width1);
3679 result_low = MIN (low0, low1);
3680 break;
3681 case UMOD:
3682 if (width1 == 0)
3683 break;
3684 result_width = MIN (width0, width1);
3685 result_low = MIN (low0, low1);
3686 break;
3687 default:
3688 gcc_unreachable ();
3689 }
3690
3691 if (result_width < mode_width)
3692 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3693
3694 if (result_low > 0)
3695 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3696
3697 #ifdef POINTERS_EXTEND_UNSIGNED
3698 /* If pointers extend unsigned and this is an addition or subtraction
3699 to a pointer in Pmode, all the bits above ptr_mode are known to be
3700 zero. */
3701 if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
3702 && (code == PLUS || code == MINUS)
3703 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
3704 nonzero &= GET_MODE_MASK (ptr_mode);
3705 #endif
3706 }
3707 break;
3708
3709 case ZERO_EXTRACT:
3710 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3711 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3712 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
3713 break;
3714
3715 case SUBREG:
3716 /* If this is a SUBREG formed for a promoted variable that has
3717 been zero-extended, we know that at least the high-order bits
3718 are zero, though others might be too. */
3719
3720 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
3721 nonzero = GET_MODE_MASK (GET_MODE (x))
3722 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
3723 known_x, known_mode, known_ret);
3724
3725 /* If the inner mode is a single word for both the host and target
3726 machines, we can compute this from which bits of the inner
3727 object might be nonzero. */
3728 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
3729 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
3730 <= HOST_BITS_PER_WIDE_INT))
3731 {
3732 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
3733 known_x, known_mode, known_ret);
3734
3735 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
3736 /* If this is a typical RISC machine, we only have to worry
3737 about the way loads are extended. */
3738 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
3739 ? (((nonzero
3740 & (((unsigned HOST_WIDE_INT) 1
3741 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
3742 != 0))
3743 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
3744 || !MEM_P (SUBREG_REG (x)))
3745 #endif
3746 {
3747 /* On many CISC machines, accessing an object in a wider mode
3748 causes the high-order bits to become undefined. So they are
3749 not known to be zero. */
3750 if (GET_MODE_SIZE (GET_MODE (x))
3751 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
3752 nonzero |= (GET_MODE_MASK (GET_MODE (x))
3753 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
3754 }
3755 }
3756 break;
3757
3758 case ASHIFTRT:
3759 case LSHIFTRT:
3760 case ASHIFT:
3761 case ROTATE:
3762 /* The nonzero bits are in two classes: any bits within MODE
3763 that aren't in GET_MODE (x) are always significant. The rest of the
3764 nonzero bits are those that are significant in the operand of
3765 the shift when shifted the appropriate number of bits. This
3766 shows that high-order bits are cleared by the right shift and
3767 low-order bits by left shifts. */
3768 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3769 && INTVAL (XEXP (x, 1)) >= 0
3770 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3771 {
3772 enum machine_mode inner_mode = GET_MODE (x);
3773 unsigned int width = GET_MODE_BITSIZE (inner_mode);
3774 int count = INTVAL (XEXP (x, 1));
3775 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
3776 unsigned HOST_WIDE_INT op_nonzero =
3777 cached_nonzero_bits (XEXP (x, 0), mode,
3778 known_x, known_mode, known_ret);
3779 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
3780 unsigned HOST_WIDE_INT outer = 0;
3781
3782 if (mode_width > width)
3783 outer = (op_nonzero & nonzero & ~mode_mask);
3784
3785 if (code == LSHIFTRT)
3786 inner >>= count;
3787 else if (code == ASHIFTRT)
3788 {
3789 inner >>= count;
3790
3791 /* If the sign bit may have been nonzero before the shift, we
3792 need to mark all the places it could have been copied to
3793 by the shift as possibly nonzero. */
3794 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
3795 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
3796 }
3797 else if (code == ASHIFT)
3798 inner <<= count;
3799 else
3800 inner = ((inner << (count % width)
3801 | (inner >> (width - (count % width)))) & mode_mask);
3802
3803 nonzero &= (outer | inner);
3804 }
3805 break;
3806
3807 case FFS:
3808 case POPCOUNT:
3809 /* This is at most the number of bits in the mode. */
3810 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
3811 break;
3812
3813 case CLZ:
3814 /* If CLZ has a known value at zero, then the nonzero bits are
3815 that value, plus the number of bits in the mode minus one. */
3816 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3817 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3818 else
3819 nonzero = -1;
3820 break;
3821
3822 case CTZ:
3823 /* If CTZ has a known value at zero, then the nonzero bits are
3824 that value, plus the number of bits in the mode minus one. */
3825 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3826 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3827 else
3828 nonzero = -1;
3829 break;
3830
3831 case PARITY:
3832 nonzero = 1;
3833 break;
3834
3835 case IF_THEN_ELSE:
3836 {
3837 unsigned HOST_WIDE_INT nonzero_true =
3838 cached_nonzero_bits (XEXP (x, 1), mode,
3839 known_x, known_mode, known_ret);
3840
3841 /* Don't call nonzero_bits for the second time if it cannot change
3842 anything. */
3843 if ((nonzero & nonzero_true) != nonzero)
3844 nonzero &= nonzero_true
3845 | cached_nonzero_bits (XEXP (x, 2), mode,
3846 known_x, known_mode, known_ret);
3847 }
3848 break;
3849
3850 default:
3851 break;
3852 }
3853
3854 return nonzero;
3855 }
3856
3857 /* See the macro definition above. */
3858 #undef cached_num_sign_bit_copies
3859
3860 \f
3861 /* The function cached_num_sign_bit_copies is a wrapper around
3862 num_sign_bit_copies1. It avoids exponential behavior in
3863 num_sign_bit_copies1 when X has identical subexpressions on the
3864 first or the second level. */
3865
3866 static unsigned int
3867 cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x,
3868 enum machine_mode known_mode,
3869 unsigned int known_ret)
3870 {
3871 if (x == known_x && mode == known_mode)
3872 return known_ret;
3873
3874 /* Try to find identical subexpressions. If found call
3875 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
3876 the precomputed value for the subexpression as KNOWN_RET. */
3877
3878 if (ARITHMETIC_P (x))
3879 {
3880 rtx x0 = XEXP (x, 0);
3881 rtx x1 = XEXP (x, 1);
3882
3883 /* Check the first level. */
3884 if (x0 == x1)
3885 return
3886 num_sign_bit_copies1 (x, mode, x0, mode,
3887 cached_num_sign_bit_copies (x0, mode, known_x,
3888 known_mode,
3889 known_ret));
3890
3891 /* Check the second level. */
3892 if (ARITHMETIC_P (x0)
3893 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3894 return
3895 num_sign_bit_copies1 (x, mode, x1, mode,
3896 cached_num_sign_bit_copies (x1, mode, known_x,
3897 known_mode,
3898 known_ret));
3899
3900 if (ARITHMETIC_P (x1)
3901 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3902 return
3903 num_sign_bit_copies1 (x, mode, x0, mode,
3904 cached_num_sign_bit_copies (x0, mode, known_x,
3905 known_mode,
3906 known_ret));
3907 }
3908
3909 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
3910 }
3911
3912 /* Return the number of bits at the high-order end of X that are known to
3913 be equal to the sign bit. X will be used in mode MODE; if MODE is
3914 VOIDmode, X will be used in its own mode. The returned value will always
3915 be between 1 and the number of bits in MODE. */
3916
3917 static unsigned int
3918 num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x,
3919 enum machine_mode known_mode,
3920 unsigned int known_ret)
3921 {
3922 enum rtx_code code = GET_CODE (x);
3923 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
3924 int num0, num1, result;
3925 unsigned HOST_WIDE_INT nonzero;
3926
3927 /* If we weren't given a mode, use the mode of X. If the mode is still
3928 VOIDmode, we don't know anything. Likewise if one of the modes is
3929 floating-point. */
3930
3931 if (mode == VOIDmode)
3932 mode = GET_MODE (x);
3933
3934 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
3935 return 1;
3936
3937 /* For a smaller object, just ignore the high bits. */
3938 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
3939 {
3940 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
3941 known_x, known_mode, known_ret);
3942 return MAX (1,
3943 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
3944 }
3945
3946 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
3947 {
3948 #ifndef WORD_REGISTER_OPERATIONS
3949 /* If this machine does not do all register operations on the entire
3950 register and MODE is wider than the mode of X, we can say nothing
3951 at all about the high-order bits. */
3952 return 1;
3953 #else
3954 /* Likewise on machines that do, if the mode of the object is smaller
3955 than a word and loads of that size don't sign extend, we can say
3956 nothing about the high order bits. */
3957 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
3958 #ifdef LOAD_EXTEND_OP
3959 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
3960 #endif
3961 )
3962 return 1;
3963 #endif
3964 }
3965
3966 switch (code)
3967 {
3968 case REG:
3969
3970 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3971 /* If pointers extend signed and this is a pointer in Pmode, say that
3972 all the bits above ptr_mode are known to be sign bit copies. */
3973 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
3974 && REG_POINTER (x))
3975 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
3976 #endif
3977
3978 {
3979 unsigned int copies_for_hook = 1, copies = 1;
3980 rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
3981 known_mode, known_ret,
3982 &copies_for_hook);
3983
3984 if (new)
3985 copies = cached_num_sign_bit_copies (new, mode, known_x,
3986 known_mode, known_ret);
3987
3988 if (copies > 1 || copies_for_hook > 1)
3989 return MAX (copies, copies_for_hook);
3990
3991 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
3992 }
3993 break;
3994
3995 case MEM:
3996 #ifdef LOAD_EXTEND_OP
3997 /* Some RISC machines sign-extend all loads of smaller than a word. */
3998 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
3999 return MAX (1, ((int) bitwidth
4000 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
4001 #endif
4002 break;
4003
4004 case CONST_INT:
4005 /* If the constant is negative, take its 1's complement and remask.
4006 Then see how many zero bits we have. */
4007 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
4008 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4009 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4010 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4011
4012 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4013
4014 case SUBREG:
4015 /* If this is a SUBREG for a promoted object that is sign-extended
4016 and we are looking at it in a wider mode, we know that at least the
4017 high-order bits are known to be sign bit copies. */
4018
4019 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4020 {
4021 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4022 known_x, known_mode, known_ret);
4023 return MAX ((int) bitwidth
4024 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4025 num0);
4026 }
4027
4028 /* For a smaller object, just ignore the high bits. */
4029 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4030 {
4031 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4032 known_x, known_mode, known_ret);
4033 return MAX (1, (num0
4034 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4035 - bitwidth)));
4036 }
4037
4038 #ifdef WORD_REGISTER_OPERATIONS
4039 #ifdef LOAD_EXTEND_OP
4040 /* For paradoxical SUBREGs on machines where all register operations
4041 affect the entire register, just look inside. Note that we are
4042 passing MODE to the recursive call, so the number of sign bit copies
4043 will remain relative to that mode, not the inner mode. */
4044
4045 /* This works only if loads sign extend. Otherwise, if we get a
4046 reload for the inner part, it may be loaded from the stack, and
4047 then we lose all sign bit copies that existed before the store
4048 to the stack. */
4049
4050 if ((GET_MODE_SIZE (GET_MODE (x))
4051 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4052 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4053 && MEM_P (SUBREG_REG (x)))
4054 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4055 known_x, known_mode, known_ret);
4056 #endif
4057 #endif
4058 break;
4059
4060 case SIGN_EXTRACT:
4061 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4062 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4063 break;
4064
4065 case SIGN_EXTEND:
4066 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4067 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4068 known_x, known_mode, known_ret));
4069
4070 case TRUNCATE:
4071 /* For a smaller object, just ignore the high bits. */
4072 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4073 known_x, known_mode, known_ret);
4074 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4075 - bitwidth)));
4076
4077 case NOT:
4078 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4079 known_x, known_mode, known_ret);
4080
4081 case ROTATE: case ROTATERT:
4082 /* If we are rotating left by a number of bits less than the number
4083 of sign bit copies, we can just subtract that amount from the
4084 number. */
4085 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4086 && INTVAL (XEXP (x, 1)) >= 0
4087 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4088 {
4089 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4090 known_x, known_mode, known_ret);
4091 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4092 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4093 }
4094 break;
4095
4096 case NEG:
4097 /* In general, this subtracts one sign bit copy. But if the value
4098 is known to be positive, the number of sign bit copies is the
4099 same as that of the input. Finally, if the input has just one bit
4100 that might be nonzero, all the bits are copies of the sign bit. */
4101 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4102 known_x, known_mode, known_ret);
4103 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4104 return num0 > 1 ? num0 - 1 : 1;
4105
4106 nonzero = nonzero_bits (XEXP (x, 0), mode);
4107 if (nonzero == 1)
4108 return bitwidth;
4109
4110 if (num0 > 1
4111 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4112 num0--;
4113
4114 return num0;
4115
4116 case IOR: case AND: case XOR:
4117 case SMIN: case SMAX: case UMIN: case UMAX:
4118 /* Logical operations will preserve the number of sign-bit copies.
4119 MIN and MAX operations always return one of the operands. */
4120 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4121 known_x, known_mode, known_ret);
4122 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4123 known_x, known_mode, known_ret);
4124 return MIN (num0, num1);
4125
4126 case PLUS: case MINUS:
4127 /* For addition and subtraction, we can have a 1-bit carry. However,
4128 if we are subtracting 1 from a positive number, there will not
4129 be such a carry. Furthermore, if the positive number is known to
4130 be 0 or 1, we know the result is either -1 or 0. */
4131
4132 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4133 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4134 {
4135 nonzero = nonzero_bits (XEXP (x, 0), mode);
4136 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4137 return (nonzero == 1 || nonzero == 0 ? bitwidth
4138 : bitwidth - floor_log2 (nonzero) - 1);
4139 }
4140
4141 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4142 known_x, known_mode, known_ret);
4143 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4144 known_x, known_mode, known_ret);
4145 result = MAX (1, MIN (num0, num1) - 1);
4146
4147 #ifdef POINTERS_EXTEND_UNSIGNED
4148 /* If pointers extend signed and this is an addition or subtraction
4149 to a pointer in Pmode, all the bits above ptr_mode are known to be
4150 sign bit copies. */
4151 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4152 && (code == PLUS || code == MINUS)
4153 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4154 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4155 - GET_MODE_BITSIZE (ptr_mode) + 1),
4156 result);
4157 #endif
4158 return result;
4159
4160 case MULT:
4161 /* The number of bits of the product is the sum of the number of
4162 bits of both terms. However, unless one of the terms if known
4163 to be positive, we must allow for an additional bit since negating
4164 a negative number can remove one sign bit copy. */
4165
4166 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4167 known_x, known_mode, known_ret);
4168 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4169 known_x, known_mode, known_ret);
4170
4171 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4172 if (result > 0
4173 && (bitwidth > HOST_BITS_PER_WIDE_INT
4174 || (((nonzero_bits (XEXP (x, 0), mode)
4175 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4176 && ((nonzero_bits (XEXP (x, 1), mode)
4177 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4178 result--;
4179
4180 return MAX (1, result);
4181
4182 case UDIV:
4183 /* The result must be <= the first operand. If the first operand
4184 has the high bit set, we know nothing about the number of sign
4185 bit copies. */
4186 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4187 return 1;
4188 else if ((nonzero_bits (XEXP (x, 0), mode)
4189 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4190 return 1;
4191 else
4192 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4193 known_x, known_mode, known_ret);
4194
4195 case UMOD:
4196 /* The result must be <= the second operand. */
4197 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4198 known_x, known_mode, known_ret);
4199
4200 case DIV:
4201 /* Similar to unsigned division, except that we have to worry about
4202 the case where the divisor is negative, in which case we have
4203 to add 1. */
4204 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4205 known_x, known_mode, known_ret);
4206 if (result > 1
4207 && (bitwidth > HOST_BITS_PER_WIDE_INT
4208 || (nonzero_bits (XEXP (x, 1), mode)
4209 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4210 result--;
4211
4212 return result;
4213
4214 case MOD:
4215 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4216 known_x, known_mode, known_ret);
4217 if (result > 1
4218 && (bitwidth > HOST_BITS_PER_WIDE_INT
4219 || (nonzero_bits (XEXP (x, 1), mode)
4220 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4221 result--;
4222
4223 return result;
4224
4225 case ASHIFTRT:
4226 /* Shifts by a constant add to the number of bits equal to the
4227 sign bit. */
4228 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4229 known_x, known_mode, known_ret);
4230 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4231 && INTVAL (XEXP (x, 1)) > 0)
4232 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4233
4234 return num0;
4235
4236 case ASHIFT:
4237 /* Left shifts destroy copies. */
4238 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4239 || INTVAL (XEXP (x, 1)) < 0
4240 || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
4241 return 1;
4242
4243 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4244 known_x, known_mode, known_ret);
4245 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4246
4247 case IF_THEN_ELSE:
4248 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4249 known_x, known_mode, known_ret);
4250 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4251 known_x, known_mode, known_ret);
4252 return MIN (num0, num1);
4253
4254 case EQ: case NE: case GE: case GT: case LE: case LT:
4255 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4256 case GEU: case GTU: case LEU: case LTU:
4257 case UNORDERED: case ORDERED:
4258 /* If the constant is negative, take its 1's complement and remask.
4259 Then see how many zero bits we have. */
4260 nonzero = STORE_FLAG_VALUE;
4261 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4262 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4263 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4264
4265 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4266
4267 default:
4268 break;
4269 }
4270
4271 /* If we haven't been able to figure it out by one of the above rules,
4272 see if some of the high-order bits are known to be zero. If so,
4273 count those bits and return one less than that amount. If we can't
4274 safely compute the mask for this mode, always return BITWIDTH. */
4275
4276 bitwidth = GET_MODE_BITSIZE (mode);
4277 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4278 return 1;
4279
4280 nonzero = nonzero_bits (x, mode);
4281 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4282 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4283 }
4284
4285 /* Calculate the rtx_cost of a single instruction. A return value of
4286 zero indicates an instruction pattern without a known cost. */
4287
4288 int
4289 insn_rtx_cost (rtx pat)
4290 {
4291 int i, cost;
4292 rtx set;
4293
4294 /* Extract the single set rtx from the instruction pattern.
4295 We can't use single_set since we only have the pattern. */
4296 if (GET_CODE (pat) == SET)
4297 set = pat;
4298 else if (GET_CODE (pat) == PARALLEL)
4299 {
4300 set = NULL_RTX;
4301 for (i = 0; i < XVECLEN (pat, 0); i++)
4302 {
4303 rtx x = XVECEXP (pat, 0, i);
4304 if (GET_CODE (x) == SET)
4305 {
4306 if (set)
4307 return 0;
4308 set = x;
4309 }
4310 }
4311 if (!set)
4312 return 0;
4313 }
4314 else
4315 return 0;
4316
4317 cost = rtx_cost (SET_SRC (set), SET);
4318 return cost > 0 ? cost : COSTS_N_INSNS (1);
4319 }
4320
4321 /* Given an insn INSN and condition COND, return the condition in a
4322 canonical form to simplify testing by callers. Specifically:
4323
4324 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4325 (2) Both operands will be machine operands; (cc0) will have been replaced.
4326 (3) If an operand is a constant, it will be the second operand.
4327 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4328 for GE, GEU, and LEU.
4329
4330 If the condition cannot be understood, or is an inequality floating-point
4331 comparison which needs to be reversed, 0 will be returned.
4332
4333 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4334
4335 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4336 insn used in locating the condition was found. If a replacement test
4337 of the condition is desired, it should be placed in front of that
4338 insn and we will be sure that the inputs are still valid.
4339
4340 If WANT_REG is nonzero, we wish the condition to be relative to that
4341 register, if possible. Therefore, do not canonicalize the condition
4342 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4343 to be a compare to a CC mode register.
4344
4345 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4346 and at INSN. */
4347
4348 rtx
4349 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4350 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4351 {
4352 enum rtx_code code;
4353 rtx prev = insn;
4354 rtx set;
4355 rtx tem;
4356 rtx op0, op1;
4357 int reverse_code = 0;
4358 enum machine_mode mode;
4359 basic_block bb = BLOCK_FOR_INSN (insn);
4360
4361 code = GET_CODE (cond);
4362 mode = GET_MODE (cond);
4363 op0 = XEXP (cond, 0);
4364 op1 = XEXP (cond, 1);
4365
4366 if (reverse)
4367 code = reversed_comparison_code (cond, insn);
4368 if (code == UNKNOWN)
4369 return 0;
4370
4371 if (earliest)
4372 *earliest = insn;
4373
4374 /* If we are comparing a register with zero, see if the register is set
4375 in the previous insn to a COMPARE or a comparison operation. Perform
4376 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4377 in cse.c */
4378
4379 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4380 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4381 && op1 == CONST0_RTX (GET_MODE (op0))
4382 && op0 != want_reg)
4383 {
4384 /* Set nonzero when we find something of interest. */
4385 rtx x = 0;
4386
4387 #ifdef HAVE_cc0
4388 /* If comparison with cc0, import actual comparison from compare
4389 insn. */
4390 if (op0 == cc0_rtx)
4391 {
4392 if ((prev = prev_nonnote_insn (prev)) == 0
4393 || !NONJUMP_INSN_P (prev)
4394 || (set = single_set (prev)) == 0
4395 || SET_DEST (set) != cc0_rtx)
4396 return 0;
4397
4398 op0 = SET_SRC (set);
4399 op1 = CONST0_RTX (GET_MODE (op0));
4400 if (earliest)
4401 *earliest = prev;
4402 }
4403 #endif
4404
4405 /* If this is a COMPARE, pick up the two things being compared. */
4406 if (GET_CODE (op0) == COMPARE)
4407 {
4408 op1 = XEXP (op0, 1);
4409 op0 = XEXP (op0, 0);
4410 continue;
4411 }
4412 else if (!REG_P (op0))
4413 break;
4414
4415 /* Go back to the previous insn. Stop if it is not an INSN. We also
4416 stop if it isn't a single set or if it has a REG_INC note because
4417 we don't want to bother dealing with it. */
4418
4419 if ((prev = prev_nonnote_insn (prev)) == 0
4420 || !NONJUMP_INSN_P (prev)
4421 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4422 /* In cfglayout mode, there do not have to be labels at the
4423 beginning of a block, or jumps at the end, so the previous
4424 conditions would not stop us when we reach bb boundary. */
4425 || BLOCK_FOR_INSN (prev) != bb)
4426 break;
4427
4428 set = set_of (op0, prev);
4429
4430 if (set
4431 && (GET_CODE (set) != SET
4432 || !rtx_equal_p (SET_DEST (set), op0)))
4433 break;
4434
4435 /* If this is setting OP0, get what it sets it to if it looks
4436 relevant. */
4437 if (set)
4438 {
4439 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4440 #ifdef FLOAT_STORE_FLAG_VALUE
4441 REAL_VALUE_TYPE fsfv;
4442 #endif
4443
4444 /* ??? We may not combine comparisons done in a CCmode with
4445 comparisons not done in a CCmode. This is to aid targets
4446 like Alpha that have an IEEE compliant EQ instruction, and
4447 a non-IEEE compliant BEQ instruction. The use of CCmode is
4448 actually artificial, simply to prevent the combination, but
4449 should not affect other platforms.
4450
4451 However, we must allow VOIDmode comparisons to match either
4452 CCmode or non-CCmode comparison, because some ports have
4453 modeless comparisons inside branch patterns.
4454
4455 ??? This mode check should perhaps look more like the mode check
4456 in simplify_comparison in combine. */
4457
4458 if ((GET_CODE (SET_SRC (set)) == COMPARE
4459 || (((code == NE
4460 || (code == LT
4461 && GET_MODE_CLASS (inner_mode) == MODE_INT
4462 && (GET_MODE_BITSIZE (inner_mode)
4463 <= HOST_BITS_PER_WIDE_INT)
4464 && (STORE_FLAG_VALUE
4465 & ((HOST_WIDE_INT) 1
4466 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4467 #ifdef FLOAT_STORE_FLAG_VALUE
4468 || (code == LT
4469 && SCALAR_FLOAT_MODE_P (inner_mode)
4470 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4471 REAL_VALUE_NEGATIVE (fsfv)))
4472 #endif
4473 ))
4474 && COMPARISON_P (SET_SRC (set))))
4475 && (((GET_MODE_CLASS (mode) == MODE_CC)
4476 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4477 || mode == VOIDmode || inner_mode == VOIDmode))
4478 x = SET_SRC (set);
4479 else if (((code == EQ
4480 || (code == GE
4481 && (GET_MODE_BITSIZE (inner_mode)
4482 <= HOST_BITS_PER_WIDE_INT)
4483 && GET_MODE_CLASS (inner_mode) == MODE_INT
4484 && (STORE_FLAG_VALUE
4485 & ((HOST_WIDE_INT) 1
4486 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4487 #ifdef FLOAT_STORE_FLAG_VALUE
4488 || (code == GE
4489 && SCALAR_FLOAT_MODE_P (inner_mode)
4490 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4491 REAL_VALUE_NEGATIVE (fsfv)))
4492 #endif
4493 ))
4494 && COMPARISON_P (SET_SRC (set))
4495 && (((GET_MODE_CLASS (mode) == MODE_CC)
4496 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4497 || mode == VOIDmode || inner_mode == VOIDmode))
4498
4499 {
4500 reverse_code = 1;
4501 x = SET_SRC (set);
4502 }
4503 else
4504 break;
4505 }
4506
4507 else if (reg_set_p (op0, prev))
4508 /* If this sets OP0, but not directly, we have to give up. */
4509 break;
4510
4511 if (x)
4512 {
4513 /* If the caller is expecting the condition to be valid at INSN,
4514 make sure X doesn't change before INSN. */
4515 if (valid_at_insn_p)
4516 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4517 break;
4518 if (COMPARISON_P (x))
4519 code = GET_CODE (x);
4520 if (reverse_code)
4521 {
4522 code = reversed_comparison_code (x, prev);
4523 if (code == UNKNOWN)
4524 return 0;
4525 reverse_code = 0;
4526 }
4527
4528 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4529 if (earliest)
4530 *earliest = prev;
4531 }
4532 }
4533
4534 /* If constant is first, put it last. */
4535 if (CONSTANT_P (op0))
4536 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4537
4538 /* If OP0 is the result of a comparison, we weren't able to find what
4539 was really being compared, so fail. */
4540 if (!allow_cc_mode
4541 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4542 return 0;
4543
4544 /* Canonicalize any ordered comparison with integers involving equality
4545 if we can do computations in the relevant mode and we do not
4546 overflow. */
4547
4548 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4549 && GET_CODE (op1) == CONST_INT
4550 && GET_MODE (op0) != VOIDmode
4551 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4552 {
4553 HOST_WIDE_INT const_val = INTVAL (op1);
4554 unsigned HOST_WIDE_INT uconst_val = const_val;
4555 unsigned HOST_WIDE_INT max_val
4556 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4557
4558 switch (code)
4559 {
4560 case LE:
4561 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4562 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4563 break;
4564
4565 /* When cross-compiling, const_val might be sign-extended from
4566 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4567 case GE:
4568 if ((HOST_WIDE_INT) (const_val & max_val)
4569 != (((HOST_WIDE_INT) 1
4570 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4571 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4572 break;
4573
4574 case LEU:
4575 if (uconst_val < max_val)
4576 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4577 break;
4578
4579 case GEU:
4580 if (uconst_val != 0)
4581 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4582 break;
4583
4584 default:
4585 break;
4586 }
4587 }
4588
4589 /* Never return CC0; return zero instead. */
4590 if (CC0_P (op0))
4591 return 0;
4592
4593 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4594 }
4595
4596 /* Given a jump insn JUMP, return the condition that will cause it to branch
4597 to its JUMP_LABEL. If the condition cannot be understood, or is an
4598 inequality floating-point comparison which needs to be reversed, 0 will
4599 be returned.
4600
4601 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4602 insn used in locating the condition was found. If a replacement test
4603 of the condition is desired, it should be placed in front of that
4604 insn and we will be sure that the inputs are still valid. If EARLIEST
4605 is null, the returned condition will be valid at INSN.
4606
4607 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4608 compare CC mode register.
4609
4610 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4611
4612 rtx
4613 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4614 {
4615 rtx cond;
4616 int reverse;
4617 rtx set;
4618
4619 /* If this is not a standard conditional jump, we can't parse it. */
4620 if (!JUMP_P (jump)
4621 || ! any_condjump_p (jump))
4622 return 0;
4623 set = pc_set (jump);
4624
4625 cond = XEXP (SET_SRC (set), 0);
4626
4627 /* If this branches to JUMP_LABEL when the condition is false, reverse
4628 the condition. */
4629 reverse
4630 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4631 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4632
4633 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4634 allow_cc_mode, valid_at_insn_p);
4635 }
4636
4637 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4638 TARGET_MODE_REP_EXTENDED.
4639
4640 Note that we assume that the property of
4641 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4642 narrower than mode B. I.e., if A is a mode narrower than B then in
4643 order to be able to operate on it in mode B, mode A needs to
4644 satisfy the requirements set by the representation of mode B. */
4645
4646 static void
4647 init_num_sign_bit_copies_in_rep (void)
4648 {
4649 enum machine_mode mode, in_mode;
4650
4651 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4652 in_mode = GET_MODE_WIDER_MODE (mode))
4653 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4654 mode = GET_MODE_WIDER_MODE (mode))
4655 {
4656 enum machine_mode i;
4657
4658 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4659 extends to the next widest mode. */
4660 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
4661 || GET_MODE_WIDER_MODE (mode) == in_mode);
4662
4663 /* We are in in_mode. Count how many bits outside of mode
4664 have to be copies of the sign-bit. */
4665 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
4666 {
4667 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
4668
4669 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
4670 /* We can only check sign-bit copies starting from the
4671 top-bit. In order to be able to check the bits we
4672 have already seen we pretend that subsequent bits
4673 have to be sign-bit copies too. */
4674 || num_sign_bit_copies_in_rep [in_mode][mode])
4675 num_sign_bit_copies_in_rep [in_mode][mode]
4676 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
4677 }
4678 }
4679 }
4680
4681 /* Suppose that truncation from the machine mode of X to MODE is not a
4682 no-op. See if there is anything special about X so that we can
4683 assume it already contains a truncated value of MODE. */
4684
4685 bool
4686 truncated_to_mode (enum machine_mode mode, rtx x)
4687 {
4688 /* This register has already been used in MODE without explicit
4689 truncation. */
4690 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
4691 return true;
4692
4693 /* See if we already satisfy the requirements of MODE. If yes we
4694 can just switch to MODE. */
4695 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
4696 && (num_sign_bit_copies (x, GET_MODE (x))
4697 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
4698 return true;
4699
4700 return false;
4701 }
4702 \f
4703 /* Initialize non_rtx_starting_operands, which is used to speed up
4704 for_each_rtx. */
4705 void
4706 init_rtlanal (void)
4707 {
4708 int i;
4709 for (i = 0; i < NUM_RTX_CODE; i++)
4710 {
4711 const char *format = GET_RTX_FORMAT (i);
4712 const char *first = strpbrk (format, "eEV");
4713 non_rtx_starting_operands[i] = first ? first - format : -1;
4714 }
4715
4716 init_num_sign_bit_copies_in_rep ();
4717 }
4718 \f
4719 /* Check whether this is a constant pool constant. */
4720 bool
4721 constant_pool_constant_p (rtx x)
4722 {
4723 x = avoid_constant_pool_reference (x);
4724 return GET_CODE (x) == CONST_DOUBLE;
4725 }
4726