jump.c (rtx_renumbered_equal_p): Use subreg_get_info.
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28 #include "rtl.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "recog.h"
32 #include "target.h"
33 #include "output.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "real.h"
37 #include "regs.h"
38 #include "function.h"
39 #include "df.h"
40 #include "tree.h"
41
42 /* Forward declarations */
43 static void set_of_1 (rtx, const_rtx, void *);
44 static bool covers_regno_p (const_rtx, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46 static int rtx_referenced_p_1 (rtx *, void *);
47 static int computed_jump_p_1 (const_rtx);
48 static void parms_set (rtx, const_rtx, void *);
49
50 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
51 const_rtx, enum machine_mode,
52 unsigned HOST_WIDE_INT);
53 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
54 const_rtx, enum machine_mode,
55 unsigned HOST_WIDE_INT);
56 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
57 enum machine_mode,
58 unsigned int);
59 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
60 enum machine_mode, unsigned int);
61
62 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
63 -1 if a code has no such operand. */
64 static int non_rtx_starting_operands[NUM_RTX_CODE];
65
66 /* Bit flags that specify the machine subtype we are compiling for.
67 Bits are tested using macros TARGET_... defined in the tm.h file
68 and set by `-m...' switches. Must be defined in rtlanal.c. */
69
70 int target_flags;
71
72 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
73 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
74 SIGN_EXTEND then while narrowing we also have to enforce the
75 representation and sign-extend the value to mode DESTINATION_REP.
76
77 If the value is already sign-extended to DESTINATION_REP mode we
78 can just switch to DESTINATION mode on it. For each pair of
79 integral modes SOURCE and DESTINATION, when truncating from SOURCE
80 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
81 contains the number of high-order bits in SOURCE that have to be
82 copies of the sign-bit so that we can do this mode-switch to
83 DESTINATION. */
84
85 static unsigned int
86 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
87 \f
88 /* Return 1 if the value of X is unstable
89 (would be different at a different point in the program).
90 The frame pointer, arg pointer, etc. are considered stable
91 (within one function) and so is anything marked `unchanging'. */
92
93 int
94 rtx_unstable_p (const_rtx x)
95 {
96 const RTX_CODE code = GET_CODE (x);
97 int i;
98 const char *fmt;
99
100 switch (code)
101 {
102 case MEM:
103 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
104
105 case CONST:
106 case CONST_INT:
107 case CONST_DOUBLE:
108 case CONST_FIXED:
109 case CONST_VECTOR:
110 case SYMBOL_REF:
111 case LABEL_REF:
112 return 0;
113
114 case REG:
115 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
116 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
117 /* The arg pointer varies if it is not a fixed register. */
118 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
119 return 0;
120 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
121 /* ??? When call-clobbered, the value is stable modulo the restore
122 that must happen after a call. This currently screws up local-alloc
123 into believing that the restore is not needed. */
124 if (x == pic_offset_table_rtx)
125 return 0;
126 #endif
127 return 1;
128
129 case ASM_OPERANDS:
130 if (MEM_VOLATILE_P (x))
131 return 1;
132
133 /* Fall through. */
134
135 default:
136 break;
137 }
138
139 fmt = GET_RTX_FORMAT (code);
140 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
141 if (fmt[i] == 'e')
142 {
143 if (rtx_unstable_p (XEXP (x, i)))
144 return 1;
145 }
146 else if (fmt[i] == 'E')
147 {
148 int j;
149 for (j = 0; j < XVECLEN (x, i); j++)
150 if (rtx_unstable_p (XVECEXP (x, i, j)))
151 return 1;
152 }
153
154 return 0;
155 }
156
157 /* Return 1 if X has a value that can vary even between two
158 executions of the program. 0 means X can be compared reliably
159 against certain constants or near-constants.
160 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
161 zero, we are slightly more conservative.
162 The frame pointer and the arg pointer are considered constant. */
163
164 bool
165 rtx_varies_p (const_rtx x, bool for_alias)
166 {
167 RTX_CODE code;
168 int i;
169 const char *fmt;
170
171 if (!x)
172 return 0;
173
174 code = GET_CODE (x);
175 switch (code)
176 {
177 case MEM:
178 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
179
180 case CONST:
181 case CONST_INT:
182 case CONST_DOUBLE:
183 case CONST_FIXED:
184 case CONST_VECTOR:
185 case SYMBOL_REF:
186 case LABEL_REF:
187 return 0;
188
189 case REG:
190 /* Note that we have to test for the actual rtx used for the frame
191 and arg pointers and not just the register number in case we have
192 eliminated the frame and/or arg pointer and are using it
193 for pseudos. */
194 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
195 /* The arg pointer varies if it is not a fixed register. */
196 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
197 return 0;
198 if (x == pic_offset_table_rtx
199 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
200 /* ??? When call-clobbered, the value is stable modulo the restore
201 that must happen after a call. This currently screws up
202 local-alloc into believing that the restore is not needed, so we
203 must return 0 only if we are called from alias analysis. */
204 && for_alias
205 #endif
206 )
207 return 0;
208 return 1;
209
210 case LO_SUM:
211 /* The operand 0 of a LO_SUM is considered constant
212 (in fact it is related specifically to operand 1)
213 during alias analysis. */
214 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
215 || rtx_varies_p (XEXP (x, 1), for_alias);
216
217 case ASM_OPERANDS:
218 if (MEM_VOLATILE_P (x))
219 return 1;
220
221 /* Fall through. */
222
223 default:
224 break;
225 }
226
227 fmt = GET_RTX_FORMAT (code);
228 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
229 if (fmt[i] == 'e')
230 {
231 if (rtx_varies_p (XEXP (x, i), for_alias))
232 return 1;
233 }
234 else if (fmt[i] == 'E')
235 {
236 int j;
237 for (j = 0; j < XVECLEN (x, i); j++)
238 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
239 return 1;
240 }
241
242 return 0;
243 }
244
245 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
246 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
247 whether nonzero is returned for unaligned memory accesses on strict
248 alignment machines. */
249
250 static int
251 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
252 enum machine_mode mode, bool unaligned_mems)
253 {
254 enum rtx_code code = GET_CODE (x);
255
256 if (STRICT_ALIGNMENT
257 && unaligned_mems
258 && GET_MODE_SIZE (mode) != 0)
259 {
260 HOST_WIDE_INT actual_offset = offset;
261 #ifdef SPARC_STACK_BOUNDARY_HACK
262 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
263 the real alignment of %sp. However, when it does this, the
264 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
265 if (SPARC_STACK_BOUNDARY_HACK
266 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
267 actual_offset -= STACK_POINTER_OFFSET;
268 #endif
269
270 if (actual_offset % GET_MODE_SIZE (mode) != 0)
271 return 1;
272 }
273
274 switch (code)
275 {
276 case SYMBOL_REF:
277 if (SYMBOL_REF_WEAK (x))
278 return 1;
279 if (!CONSTANT_POOL_ADDRESS_P (x))
280 {
281 tree decl;
282 HOST_WIDE_INT decl_size;
283
284 if (offset < 0)
285 return 1;
286 if (size == 0)
287 size = GET_MODE_SIZE (mode);
288 if (size == 0)
289 return offset != 0;
290
291 /* If the size of the access or of the symbol is unknown,
292 assume the worst. */
293 decl = SYMBOL_REF_DECL (x);
294
295 /* Else check that the access is in bounds. TODO: restructure
296 expr_size/lhd_expr_size/int_expr_size and just use the latter. */
297 if (!decl)
298 decl_size = -1;
299 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
300 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
301 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
302 : -1);
303 else if (TREE_CODE (decl) == STRING_CST)
304 decl_size = TREE_STRING_LENGTH (decl);
305 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
306 decl_size = int_size_in_bytes (TREE_TYPE (decl));
307 else
308 decl_size = -1;
309
310 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
311 }
312
313 return 0;
314
315 case LABEL_REF:
316 return 0;
317
318 case REG:
319 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
320 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
321 || x == stack_pointer_rtx
322 /* The arg pointer varies if it is not a fixed register. */
323 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
324 return 0;
325 /* All of the virtual frame registers are stack references. */
326 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
327 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
328 return 0;
329 return 1;
330
331 case CONST:
332 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
333 mode, unaligned_mems);
334
335 case PLUS:
336 /* An address is assumed not to trap if:
337 - it is the pic register plus a constant. */
338 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
339 return 0;
340
341 /* - or it is an address that can't trap plus a constant integer,
342 with the proper remainder modulo the mode size if we are
343 considering unaligned memory references. */
344 if (GET_CODE (XEXP (x, 1)) == CONST_INT
345 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
346 size, mode, unaligned_mems))
347 return 0;
348
349 return 1;
350
351 case LO_SUM:
352 case PRE_MODIFY:
353 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
354 mode, unaligned_mems);
355
356 case PRE_DEC:
357 case PRE_INC:
358 case POST_DEC:
359 case POST_INC:
360 case POST_MODIFY:
361 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
362 mode, unaligned_mems);
363
364 default:
365 break;
366 }
367
368 /* If it isn't one of the case above, it can cause a trap. */
369 return 1;
370 }
371
372 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
373
374 int
375 rtx_addr_can_trap_p (const_rtx x)
376 {
377 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
378 }
379
380 /* Return true if X is an address that is known to not be zero. */
381
382 bool
383 nonzero_address_p (const_rtx x)
384 {
385 const enum rtx_code code = GET_CODE (x);
386
387 switch (code)
388 {
389 case SYMBOL_REF:
390 return !SYMBOL_REF_WEAK (x);
391
392 case LABEL_REF:
393 return true;
394
395 case REG:
396 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
397 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
398 || x == stack_pointer_rtx
399 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
400 return true;
401 /* All of the virtual frame registers are stack references. */
402 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
403 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
404 return true;
405 return false;
406
407 case CONST:
408 return nonzero_address_p (XEXP (x, 0));
409
410 case PLUS:
411 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
412 return nonzero_address_p (XEXP (x, 0));
413 /* Handle PIC references. */
414 else if (XEXP (x, 0) == pic_offset_table_rtx
415 && CONSTANT_P (XEXP (x, 1)))
416 return true;
417 return false;
418
419 case PRE_MODIFY:
420 /* Similar to the above; allow positive offsets. Further, since
421 auto-inc is only allowed in memories, the register must be a
422 pointer. */
423 if (GET_CODE (XEXP (x, 1)) == CONST_INT
424 && INTVAL (XEXP (x, 1)) > 0)
425 return true;
426 return nonzero_address_p (XEXP (x, 0));
427
428 case PRE_INC:
429 /* Similarly. Further, the offset is always positive. */
430 return true;
431
432 case PRE_DEC:
433 case POST_DEC:
434 case POST_INC:
435 case POST_MODIFY:
436 return nonzero_address_p (XEXP (x, 0));
437
438 case LO_SUM:
439 return nonzero_address_p (XEXP (x, 1));
440
441 default:
442 break;
443 }
444
445 /* If it isn't one of the case above, might be zero. */
446 return false;
447 }
448
449 /* Return 1 if X refers to a memory location whose address
450 cannot be compared reliably with constant addresses,
451 or if X refers to a BLKmode memory object.
452 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
453 zero, we are slightly more conservative. */
454
455 bool
456 rtx_addr_varies_p (const_rtx x, bool for_alias)
457 {
458 enum rtx_code code;
459 int i;
460 const char *fmt;
461
462 if (x == 0)
463 return 0;
464
465 code = GET_CODE (x);
466 if (code == MEM)
467 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
468
469 fmt = GET_RTX_FORMAT (code);
470 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
471 if (fmt[i] == 'e')
472 {
473 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
474 return 1;
475 }
476 else if (fmt[i] == 'E')
477 {
478 int j;
479 for (j = 0; j < XVECLEN (x, i); j++)
480 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
481 return 1;
482 }
483 return 0;
484 }
485 \f
486 /* Return the value of the integer term in X, if one is apparent;
487 otherwise return 0.
488 Only obvious integer terms are detected.
489 This is used in cse.c with the `related_value' field. */
490
491 HOST_WIDE_INT
492 get_integer_term (const_rtx x)
493 {
494 if (GET_CODE (x) == CONST)
495 x = XEXP (x, 0);
496
497 if (GET_CODE (x) == MINUS
498 && GET_CODE (XEXP (x, 1)) == CONST_INT)
499 return - INTVAL (XEXP (x, 1));
500 if (GET_CODE (x) == PLUS
501 && GET_CODE (XEXP (x, 1)) == CONST_INT)
502 return INTVAL (XEXP (x, 1));
503 return 0;
504 }
505
506 /* If X is a constant, return the value sans apparent integer term;
507 otherwise return 0.
508 Only obvious integer terms are detected. */
509
510 rtx
511 get_related_value (const_rtx x)
512 {
513 if (GET_CODE (x) != CONST)
514 return 0;
515 x = XEXP (x, 0);
516 if (GET_CODE (x) == PLUS
517 && GET_CODE (XEXP (x, 1)) == CONST_INT)
518 return XEXP (x, 0);
519 else if (GET_CODE (x) == MINUS
520 && GET_CODE (XEXP (x, 1)) == CONST_INT)
521 return XEXP (x, 0);
522 return 0;
523 }
524 \f
525 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
526 to somewhere in the same object or object_block as SYMBOL. */
527
528 bool
529 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
530 {
531 tree decl;
532
533 if (GET_CODE (symbol) != SYMBOL_REF)
534 return false;
535
536 if (offset == 0)
537 return true;
538
539 if (offset > 0)
540 {
541 if (CONSTANT_POOL_ADDRESS_P (symbol)
542 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
543 return true;
544
545 decl = SYMBOL_REF_DECL (symbol);
546 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
547 return true;
548 }
549
550 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
551 && SYMBOL_REF_BLOCK (symbol)
552 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
553 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
554 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
555 return true;
556
557 return false;
558 }
559
560 /* Split X into a base and a constant offset, storing them in *BASE_OUT
561 and *OFFSET_OUT respectively. */
562
563 void
564 split_const (rtx x, rtx *base_out, rtx *offset_out)
565 {
566 if (GET_CODE (x) == CONST)
567 {
568 x = XEXP (x, 0);
569 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
570 {
571 *base_out = XEXP (x, 0);
572 *offset_out = XEXP (x, 1);
573 return;
574 }
575 }
576 *base_out = x;
577 *offset_out = const0_rtx;
578 }
579 \f
580 /* Return the number of places FIND appears within X. If COUNT_DEST is
581 zero, we do not count occurrences inside the destination of a SET. */
582
583 int
584 count_occurrences (const_rtx x, const_rtx find, int count_dest)
585 {
586 int i, j;
587 enum rtx_code code;
588 const char *format_ptr;
589 int count;
590
591 if (x == find)
592 return 1;
593
594 code = GET_CODE (x);
595
596 switch (code)
597 {
598 case REG:
599 case CONST_INT:
600 case CONST_DOUBLE:
601 case CONST_FIXED:
602 case CONST_VECTOR:
603 case SYMBOL_REF:
604 case CODE_LABEL:
605 case PC:
606 case CC0:
607 return 0;
608
609 case EXPR_LIST:
610 count = count_occurrences (XEXP (x, 0), find, count_dest);
611 if (XEXP (x, 1))
612 count += count_occurrences (XEXP (x, 1), find, count_dest);
613 return count;
614
615 case MEM:
616 if (MEM_P (find) && rtx_equal_p (x, find))
617 return 1;
618 break;
619
620 case SET:
621 if (SET_DEST (x) == find && ! count_dest)
622 return count_occurrences (SET_SRC (x), find, count_dest);
623 break;
624
625 default:
626 break;
627 }
628
629 format_ptr = GET_RTX_FORMAT (code);
630 count = 0;
631
632 for (i = 0; i < GET_RTX_LENGTH (code); i++)
633 {
634 switch (*format_ptr++)
635 {
636 case 'e':
637 count += count_occurrences (XEXP (x, i), find, count_dest);
638 break;
639
640 case 'E':
641 for (j = 0; j < XVECLEN (x, i); j++)
642 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
643 break;
644 }
645 }
646 return count;
647 }
648
649 \f
650 /* Nonzero if register REG appears somewhere within IN.
651 Also works if REG is not a register; in this case it checks
652 for a subexpression of IN that is Lisp "equal" to REG. */
653
654 int
655 reg_mentioned_p (const_rtx reg, const_rtx in)
656 {
657 const char *fmt;
658 int i;
659 enum rtx_code code;
660
661 if (in == 0)
662 return 0;
663
664 if (reg == in)
665 return 1;
666
667 if (GET_CODE (in) == LABEL_REF)
668 return reg == XEXP (in, 0);
669
670 code = GET_CODE (in);
671
672 switch (code)
673 {
674 /* Compare registers by number. */
675 case REG:
676 return REG_P (reg) && REGNO (in) == REGNO (reg);
677
678 /* These codes have no constituent expressions
679 and are unique. */
680 case SCRATCH:
681 case CC0:
682 case PC:
683 return 0;
684
685 case CONST_INT:
686 case CONST_VECTOR:
687 case CONST_DOUBLE:
688 case CONST_FIXED:
689 /* These are kept unique for a given value. */
690 return 0;
691
692 default:
693 break;
694 }
695
696 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
697 return 1;
698
699 fmt = GET_RTX_FORMAT (code);
700
701 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
702 {
703 if (fmt[i] == 'E')
704 {
705 int j;
706 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
707 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
708 return 1;
709 }
710 else if (fmt[i] == 'e'
711 && reg_mentioned_p (reg, XEXP (in, i)))
712 return 1;
713 }
714 return 0;
715 }
716 \f
717 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
718 no CODE_LABEL insn. */
719
720 int
721 no_labels_between_p (const_rtx beg, const_rtx end)
722 {
723 rtx p;
724 if (beg == end)
725 return 0;
726 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
727 if (LABEL_P (p))
728 return 0;
729 return 1;
730 }
731
732 /* Nonzero if register REG is used in an insn between
733 FROM_INSN and TO_INSN (exclusive of those two). */
734
735 int
736 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
737 {
738 rtx insn;
739
740 if (from_insn == to_insn)
741 return 0;
742
743 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
744 if (INSN_P (insn)
745 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
746 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
747 return 1;
748 return 0;
749 }
750 \f
751 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
752 is entirely replaced by a new value and the only use is as a SET_DEST,
753 we do not consider it a reference. */
754
755 int
756 reg_referenced_p (const_rtx x, const_rtx body)
757 {
758 int i;
759
760 switch (GET_CODE (body))
761 {
762 case SET:
763 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
764 return 1;
765
766 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
767 of a REG that occupies all of the REG, the insn references X if
768 it is mentioned in the destination. */
769 if (GET_CODE (SET_DEST (body)) != CC0
770 && GET_CODE (SET_DEST (body)) != PC
771 && !REG_P (SET_DEST (body))
772 && ! (GET_CODE (SET_DEST (body)) == SUBREG
773 && REG_P (SUBREG_REG (SET_DEST (body)))
774 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
775 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
776 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
777 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
778 && reg_overlap_mentioned_p (x, SET_DEST (body)))
779 return 1;
780 return 0;
781
782 case ASM_OPERANDS:
783 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
784 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
785 return 1;
786 return 0;
787
788 case CALL:
789 case USE:
790 case IF_THEN_ELSE:
791 return reg_overlap_mentioned_p (x, body);
792
793 case TRAP_IF:
794 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
795
796 case PREFETCH:
797 return reg_overlap_mentioned_p (x, XEXP (body, 0));
798
799 case UNSPEC:
800 case UNSPEC_VOLATILE:
801 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
802 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
803 return 1;
804 return 0;
805
806 case PARALLEL:
807 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
808 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
809 return 1;
810 return 0;
811
812 case CLOBBER:
813 if (MEM_P (XEXP (body, 0)))
814 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
815 return 1;
816 return 0;
817
818 case COND_EXEC:
819 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
820 return 1;
821 return reg_referenced_p (x, COND_EXEC_CODE (body));
822
823 default:
824 return 0;
825 }
826 }
827 \f
828 /* Nonzero if register REG is set or clobbered in an insn between
829 FROM_INSN and TO_INSN (exclusive of those two). */
830
831 int
832 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
833 {
834 const_rtx insn;
835
836 if (from_insn == to_insn)
837 return 0;
838
839 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
840 if (INSN_P (insn) && reg_set_p (reg, insn))
841 return 1;
842 return 0;
843 }
844
845 /* Internals of reg_set_between_p. */
846 int
847 reg_set_p (const_rtx reg, const_rtx insn)
848 {
849 /* We can be passed an insn or part of one. If we are passed an insn,
850 check if a side-effect of the insn clobbers REG. */
851 if (INSN_P (insn)
852 && (FIND_REG_INC_NOTE (insn, reg)
853 || (CALL_P (insn)
854 && ((REG_P (reg)
855 && REGNO (reg) < FIRST_PSEUDO_REGISTER
856 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
857 GET_MODE (reg), REGNO (reg)))
858 || MEM_P (reg)
859 || find_reg_fusage (insn, CLOBBER, reg)))))
860 return 1;
861
862 return set_of (reg, insn) != NULL_RTX;
863 }
864
865 /* Similar to reg_set_between_p, but check all registers in X. Return 0
866 only if none of them are modified between START and END. Return 1 if
867 X contains a MEM; this routine does use memory aliasing. */
868
869 int
870 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
871 {
872 const enum rtx_code code = GET_CODE (x);
873 const char *fmt;
874 int i, j;
875 rtx insn;
876
877 if (start == end)
878 return 0;
879
880 switch (code)
881 {
882 case CONST_INT:
883 case CONST_DOUBLE:
884 case CONST_FIXED:
885 case CONST_VECTOR:
886 case CONST:
887 case SYMBOL_REF:
888 case LABEL_REF:
889 return 0;
890
891 case PC:
892 case CC0:
893 return 1;
894
895 case MEM:
896 if (modified_between_p (XEXP (x, 0), start, end))
897 return 1;
898 if (MEM_READONLY_P (x))
899 return 0;
900 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
901 if (memory_modified_in_insn_p (x, insn))
902 return 1;
903 return 0;
904 break;
905
906 case REG:
907 return reg_set_between_p (x, start, end);
908
909 default:
910 break;
911 }
912
913 fmt = GET_RTX_FORMAT (code);
914 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
915 {
916 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
917 return 1;
918
919 else if (fmt[i] == 'E')
920 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
921 if (modified_between_p (XVECEXP (x, i, j), start, end))
922 return 1;
923 }
924
925 return 0;
926 }
927
928 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
929 of them are modified in INSN. Return 1 if X contains a MEM; this routine
930 does use memory aliasing. */
931
932 int
933 modified_in_p (const_rtx x, const_rtx insn)
934 {
935 const enum rtx_code code = GET_CODE (x);
936 const char *fmt;
937 int i, j;
938
939 switch (code)
940 {
941 case CONST_INT:
942 case CONST_DOUBLE:
943 case CONST_FIXED:
944 case CONST_VECTOR:
945 case CONST:
946 case SYMBOL_REF:
947 case LABEL_REF:
948 return 0;
949
950 case PC:
951 case CC0:
952 return 1;
953
954 case MEM:
955 if (modified_in_p (XEXP (x, 0), insn))
956 return 1;
957 if (MEM_READONLY_P (x))
958 return 0;
959 if (memory_modified_in_insn_p (x, insn))
960 return 1;
961 return 0;
962 break;
963
964 case REG:
965 return reg_set_p (x, insn);
966
967 default:
968 break;
969 }
970
971 fmt = GET_RTX_FORMAT (code);
972 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
973 {
974 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
975 return 1;
976
977 else if (fmt[i] == 'E')
978 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
979 if (modified_in_p (XVECEXP (x, i, j), insn))
980 return 1;
981 }
982
983 return 0;
984 }
985 \f
986 /* Helper function for set_of. */
987 struct set_of_data
988 {
989 const_rtx found;
990 const_rtx pat;
991 };
992
993 static void
994 set_of_1 (rtx x, const_rtx pat, void *data1)
995 {
996 struct set_of_data *const data = (struct set_of_data *) (data1);
997 if (rtx_equal_p (x, data->pat)
998 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
999 data->found = pat;
1000 }
1001
1002 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1003 (either directly or via STRICT_LOW_PART and similar modifiers). */
1004 const_rtx
1005 set_of (const_rtx pat, const_rtx insn)
1006 {
1007 struct set_of_data data;
1008 data.found = NULL_RTX;
1009 data.pat = pat;
1010 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1011 return data.found;
1012 }
1013 \f
1014 /* Given an INSN, return a SET expression if this insn has only a single SET.
1015 It may also have CLOBBERs, USEs, or SET whose output
1016 will not be used, which we ignore. */
1017
1018 rtx
1019 single_set_2 (const_rtx insn, const_rtx pat)
1020 {
1021 rtx set = NULL;
1022 int set_verified = 1;
1023 int i;
1024
1025 if (GET_CODE (pat) == PARALLEL)
1026 {
1027 for (i = 0; i < XVECLEN (pat, 0); i++)
1028 {
1029 rtx sub = XVECEXP (pat, 0, i);
1030 switch (GET_CODE (sub))
1031 {
1032 case USE:
1033 case CLOBBER:
1034 break;
1035
1036 case SET:
1037 /* We can consider insns having multiple sets, where all
1038 but one are dead as single set insns. In common case
1039 only single set is present in the pattern so we want
1040 to avoid checking for REG_UNUSED notes unless necessary.
1041
1042 When we reach set first time, we just expect this is
1043 the single set we are looking for and only when more
1044 sets are found in the insn, we check them. */
1045 if (!set_verified)
1046 {
1047 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1048 && !side_effects_p (set))
1049 set = NULL;
1050 else
1051 set_verified = 1;
1052 }
1053 if (!set)
1054 set = sub, set_verified = 0;
1055 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1056 || side_effects_p (sub))
1057 return NULL_RTX;
1058 break;
1059
1060 default:
1061 return NULL_RTX;
1062 }
1063 }
1064 }
1065 return set;
1066 }
1067
1068 /* Given an INSN, return nonzero if it has more than one SET, else return
1069 zero. */
1070
1071 int
1072 multiple_sets (const_rtx insn)
1073 {
1074 int found;
1075 int i;
1076
1077 /* INSN must be an insn. */
1078 if (! INSN_P (insn))
1079 return 0;
1080
1081 /* Only a PARALLEL can have multiple SETs. */
1082 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1083 {
1084 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1085 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1086 {
1087 /* If we have already found a SET, then return now. */
1088 if (found)
1089 return 1;
1090 else
1091 found = 1;
1092 }
1093 }
1094
1095 /* Either zero or one SET. */
1096 return 0;
1097 }
1098 \f
1099 /* Return nonzero if the destination of SET equals the source
1100 and there are no side effects. */
1101
1102 int
1103 set_noop_p (const_rtx set)
1104 {
1105 rtx src = SET_SRC (set);
1106 rtx dst = SET_DEST (set);
1107
1108 if (dst == pc_rtx && src == pc_rtx)
1109 return 1;
1110
1111 if (MEM_P (dst) && MEM_P (src))
1112 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1113
1114 if (GET_CODE (dst) == ZERO_EXTRACT)
1115 return rtx_equal_p (XEXP (dst, 0), src)
1116 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1117 && !side_effects_p (src);
1118
1119 if (GET_CODE (dst) == STRICT_LOW_PART)
1120 dst = XEXP (dst, 0);
1121
1122 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1123 {
1124 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1125 return 0;
1126 src = SUBREG_REG (src);
1127 dst = SUBREG_REG (dst);
1128 }
1129
1130 return (REG_P (src) && REG_P (dst)
1131 && REGNO (src) == REGNO (dst));
1132 }
1133 \f
1134 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1135 value to itself. */
1136
1137 int
1138 noop_move_p (const_rtx insn)
1139 {
1140 rtx pat = PATTERN (insn);
1141
1142 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1143 return 1;
1144
1145 /* Insns carrying these notes are useful later on. */
1146 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1147 return 0;
1148
1149 if (GET_CODE (pat) == SET && set_noop_p (pat))
1150 return 1;
1151
1152 if (GET_CODE (pat) == PARALLEL)
1153 {
1154 int i;
1155 /* If nothing but SETs of registers to themselves,
1156 this insn can also be deleted. */
1157 for (i = 0; i < XVECLEN (pat, 0); i++)
1158 {
1159 rtx tem = XVECEXP (pat, 0, i);
1160
1161 if (GET_CODE (tem) == USE
1162 || GET_CODE (tem) == CLOBBER)
1163 continue;
1164
1165 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1166 return 0;
1167 }
1168
1169 return 1;
1170 }
1171 return 0;
1172 }
1173 \f
1174
1175 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1176 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1177 If the object was modified, if we hit a partial assignment to X, or hit a
1178 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1179 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1180 be the src. */
1181
1182 rtx
1183 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1184 {
1185 rtx p;
1186
1187 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1188 p = PREV_INSN (p))
1189 if (INSN_P (p))
1190 {
1191 rtx set = single_set (p);
1192 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1193
1194 if (set && rtx_equal_p (x, SET_DEST (set)))
1195 {
1196 rtx src = SET_SRC (set);
1197
1198 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1199 src = XEXP (note, 0);
1200
1201 if ((valid_to == NULL_RTX
1202 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1203 /* Reject hard registers because we don't usually want
1204 to use them; we'd rather use a pseudo. */
1205 && (! (REG_P (src)
1206 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1207 {
1208 *pinsn = p;
1209 return src;
1210 }
1211 }
1212
1213 /* If set in non-simple way, we don't have a value. */
1214 if (reg_set_p (x, p))
1215 break;
1216 }
1217
1218 return x;
1219 }
1220 \f
1221 /* Return nonzero if register in range [REGNO, ENDREGNO)
1222 appears either explicitly or implicitly in X
1223 other than being stored into.
1224
1225 References contained within the substructure at LOC do not count.
1226 LOC may be zero, meaning don't ignore anything. */
1227
1228 int
1229 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1230 rtx *loc)
1231 {
1232 int i;
1233 unsigned int x_regno;
1234 RTX_CODE code;
1235 const char *fmt;
1236
1237 repeat:
1238 /* The contents of a REG_NONNEG note is always zero, so we must come here
1239 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1240 if (x == 0)
1241 return 0;
1242
1243 code = GET_CODE (x);
1244
1245 switch (code)
1246 {
1247 case REG:
1248 x_regno = REGNO (x);
1249
1250 /* If we modifying the stack, frame, or argument pointer, it will
1251 clobber a virtual register. In fact, we could be more precise,
1252 but it isn't worth it. */
1253 if ((x_regno == STACK_POINTER_REGNUM
1254 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1255 || x_regno == ARG_POINTER_REGNUM
1256 #endif
1257 || x_regno == FRAME_POINTER_REGNUM)
1258 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1259 return 1;
1260
1261 return endregno > x_regno && regno < END_REGNO (x);
1262
1263 case SUBREG:
1264 /* If this is a SUBREG of a hard reg, we can see exactly which
1265 registers are being modified. Otherwise, handle normally. */
1266 if (REG_P (SUBREG_REG (x))
1267 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1268 {
1269 unsigned int inner_regno = subreg_regno (x);
1270 unsigned int inner_endregno
1271 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1272 ? subreg_nregs (x) : 1);
1273
1274 return endregno > inner_regno && regno < inner_endregno;
1275 }
1276 break;
1277
1278 case CLOBBER:
1279 case SET:
1280 if (&SET_DEST (x) != loc
1281 /* Note setting a SUBREG counts as referring to the REG it is in for
1282 a pseudo but not for hard registers since we can
1283 treat each word individually. */
1284 && ((GET_CODE (SET_DEST (x)) == SUBREG
1285 && loc != &SUBREG_REG (SET_DEST (x))
1286 && REG_P (SUBREG_REG (SET_DEST (x)))
1287 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1288 && refers_to_regno_p (regno, endregno,
1289 SUBREG_REG (SET_DEST (x)), loc))
1290 || (!REG_P (SET_DEST (x))
1291 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1292 return 1;
1293
1294 if (code == CLOBBER || loc == &SET_SRC (x))
1295 return 0;
1296 x = SET_SRC (x);
1297 goto repeat;
1298
1299 default:
1300 break;
1301 }
1302
1303 /* X does not match, so try its subexpressions. */
1304
1305 fmt = GET_RTX_FORMAT (code);
1306 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1307 {
1308 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1309 {
1310 if (i == 0)
1311 {
1312 x = XEXP (x, 0);
1313 goto repeat;
1314 }
1315 else
1316 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1317 return 1;
1318 }
1319 else if (fmt[i] == 'E')
1320 {
1321 int j;
1322 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1323 if (loc != &XVECEXP (x, i, j)
1324 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1325 return 1;
1326 }
1327 }
1328 return 0;
1329 }
1330
1331 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1332 we check if any register number in X conflicts with the relevant register
1333 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1334 contains a MEM (we don't bother checking for memory addresses that can't
1335 conflict because we expect this to be a rare case. */
1336
1337 int
1338 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1339 {
1340 unsigned int regno, endregno;
1341
1342 /* If either argument is a constant, then modifying X can not
1343 affect IN. Here we look at IN, we can profitably combine
1344 CONSTANT_P (x) with the switch statement below. */
1345 if (CONSTANT_P (in))
1346 return 0;
1347
1348 recurse:
1349 switch (GET_CODE (x))
1350 {
1351 case STRICT_LOW_PART:
1352 case ZERO_EXTRACT:
1353 case SIGN_EXTRACT:
1354 /* Overly conservative. */
1355 x = XEXP (x, 0);
1356 goto recurse;
1357
1358 case SUBREG:
1359 regno = REGNO (SUBREG_REG (x));
1360 if (regno < FIRST_PSEUDO_REGISTER)
1361 regno = subreg_regno (x);
1362 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1363 ? subreg_nregs (x) : 1);
1364 goto do_reg;
1365
1366 case REG:
1367 regno = REGNO (x);
1368 endregno = END_REGNO (x);
1369 do_reg:
1370 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1371
1372 case MEM:
1373 {
1374 const char *fmt;
1375 int i;
1376
1377 if (MEM_P (in))
1378 return 1;
1379
1380 fmt = GET_RTX_FORMAT (GET_CODE (in));
1381 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1382 if (fmt[i] == 'e')
1383 {
1384 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1385 return 1;
1386 }
1387 else if (fmt[i] == 'E')
1388 {
1389 int j;
1390 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1391 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1392 return 1;
1393 }
1394
1395 return 0;
1396 }
1397
1398 case SCRATCH:
1399 case PC:
1400 case CC0:
1401 return reg_mentioned_p (x, in);
1402
1403 case PARALLEL:
1404 {
1405 int i;
1406
1407 /* If any register in here refers to it we return true. */
1408 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1409 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1410 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1411 return 1;
1412 return 0;
1413 }
1414
1415 default:
1416 gcc_assert (CONSTANT_P (x));
1417 return 0;
1418 }
1419 }
1420 \f
1421 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1422 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1423 ignored by note_stores, but passed to FUN.
1424
1425 FUN receives three arguments:
1426 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1427 2. the SET or CLOBBER rtx that does the store,
1428 3. the pointer DATA provided to note_stores.
1429
1430 If the item being stored in or clobbered is a SUBREG of a hard register,
1431 the SUBREG will be passed. */
1432
1433 void
1434 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1435 {
1436 int i;
1437
1438 if (GET_CODE (x) == COND_EXEC)
1439 x = COND_EXEC_CODE (x);
1440
1441 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1442 {
1443 rtx dest = SET_DEST (x);
1444
1445 while ((GET_CODE (dest) == SUBREG
1446 && (!REG_P (SUBREG_REG (dest))
1447 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1448 || GET_CODE (dest) == ZERO_EXTRACT
1449 || GET_CODE (dest) == STRICT_LOW_PART)
1450 dest = XEXP (dest, 0);
1451
1452 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1453 each of whose first operand is a register. */
1454 if (GET_CODE (dest) == PARALLEL)
1455 {
1456 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1457 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1458 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1459 }
1460 else
1461 (*fun) (dest, x, data);
1462 }
1463
1464 else if (GET_CODE (x) == PARALLEL)
1465 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1466 note_stores (XVECEXP (x, 0, i), fun, data);
1467 }
1468 \f
1469 /* Like notes_stores, but call FUN for each expression that is being
1470 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1471 FUN for each expression, not any interior subexpressions. FUN receives a
1472 pointer to the expression and the DATA passed to this function.
1473
1474 Note that this is not quite the same test as that done in reg_referenced_p
1475 since that considers something as being referenced if it is being
1476 partially set, while we do not. */
1477
1478 void
1479 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1480 {
1481 rtx body = *pbody;
1482 int i;
1483
1484 switch (GET_CODE (body))
1485 {
1486 case COND_EXEC:
1487 (*fun) (&COND_EXEC_TEST (body), data);
1488 note_uses (&COND_EXEC_CODE (body), fun, data);
1489 return;
1490
1491 case PARALLEL:
1492 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1493 note_uses (&XVECEXP (body, 0, i), fun, data);
1494 return;
1495
1496 case SEQUENCE:
1497 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1498 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1499 return;
1500
1501 case USE:
1502 (*fun) (&XEXP (body, 0), data);
1503 return;
1504
1505 case ASM_OPERANDS:
1506 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1507 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1508 return;
1509
1510 case TRAP_IF:
1511 (*fun) (&TRAP_CONDITION (body), data);
1512 return;
1513
1514 case PREFETCH:
1515 (*fun) (&XEXP (body, 0), data);
1516 return;
1517
1518 case UNSPEC:
1519 case UNSPEC_VOLATILE:
1520 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1521 (*fun) (&XVECEXP (body, 0, i), data);
1522 return;
1523
1524 case CLOBBER:
1525 if (MEM_P (XEXP (body, 0)))
1526 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1527 return;
1528
1529 case SET:
1530 {
1531 rtx dest = SET_DEST (body);
1532
1533 /* For sets we replace everything in source plus registers in memory
1534 expression in store and operands of a ZERO_EXTRACT. */
1535 (*fun) (&SET_SRC (body), data);
1536
1537 if (GET_CODE (dest) == ZERO_EXTRACT)
1538 {
1539 (*fun) (&XEXP (dest, 1), data);
1540 (*fun) (&XEXP (dest, 2), data);
1541 }
1542
1543 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1544 dest = XEXP (dest, 0);
1545
1546 if (MEM_P (dest))
1547 (*fun) (&XEXP (dest, 0), data);
1548 }
1549 return;
1550
1551 default:
1552 /* All the other possibilities never store. */
1553 (*fun) (pbody, data);
1554 return;
1555 }
1556 }
1557 \f
1558 /* Return nonzero if X's old contents don't survive after INSN.
1559 This will be true if X is (cc0) or if X is a register and
1560 X dies in INSN or because INSN entirely sets X.
1561
1562 "Entirely set" means set directly and not through a SUBREG, or
1563 ZERO_EXTRACT, so no trace of the old contents remains.
1564 Likewise, REG_INC does not count.
1565
1566 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1567 but for this use that makes no difference, since regs don't overlap
1568 during their lifetimes. Therefore, this function may be used
1569 at any time after deaths have been computed.
1570
1571 If REG is a hard reg that occupies multiple machine registers, this
1572 function will only return 1 if each of those registers will be replaced
1573 by INSN. */
1574
1575 int
1576 dead_or_set_p (const_rtx insn, const_rtx x)
1577 {
1578 unsigned int regno, end_regno;
1579 unsigned int i;
1580
1581 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1582 if (GET_CODE (x) == CC0)
1583 return 1;
1584
1585 gcc_assert (REG_P (x));
1586
1587 regno = REGNO (x);
1588 end_regno = END_REGNO (x);
1589 for (i = regno; i < end_regno; i++)
1590 if (! dead_or_set_regno_p (insn, i))
1591 return 0;
1592
1593 return 1;
1594 }
1595
1596 /* Return TRUE iff DEST is a register or subreg of a register and
1597 doesn't change the number of words of the inner register, and any
1598 part of the register is TEST_REGNO. */
1599
1600 static bool
1601 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1602 {
1603 unsigned int regno, endregno;
1604
1605 if (GET_CODE (dest) == SUBREG
1606 && (((GET_MODE_SIZE (GET_MODE (dest))
1607 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1608 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1609 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1610 dest = SUBREG_REG (dest);
1611
1612 if (!REG_P (dest))
1613 return false;
1614
1615 regno = REGNO (dest);
1616 endregno = END_REGNO (dest);
1617 return (test_regno >= regno && test_regno < endregno);
1618 }
1619
1620 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1621 any member matches the covers_regno_no_parallel_p criteria. */
1622
1623 static bool
1624 covers_regno_p (const_rtx dest, unsigned int test_regno)
1625 {
1626 if (GET_CODE (dest) == PARALLEL)
1627 {
1628 /* Some targets place small structures in registers for return
1629 values of functions, and those registers are wrapped in
1630 PARALLELs that we may see as the destination of a SET. */
1631 int i;
1632
1633 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1634 {
1635 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1636 if (inner != NULL_RTX
1637 && covers_regno_no_parallel_p (inner, test_regno))
1638 return true;
1639 }
1640
1641 return false;
1642 }
1643 else
1644 return covers_regno_no_parallel_p (dest, test_regno);
1645 }
1646
1647 /* Utility function for dead_or_set_p to check an individual register. */
1648
1649 int
1650 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1651 {
1652 const_rtx pattern;
1653
1654 /* See if there is a death note for something that includes TEST_REGNO. */
1655 if (find_regno_note (insn, REG_DEAD, test_regno))
1656 return 1;
1657
1658 if (CALL_P (insn)
1659 && find_regno_fusage (insn, CLOBBER, test_regno))
1660 return 1;
1661
1662 pattern = PATTERN (insn);
1663
1664 if (GET_CODE (pattern) == COND_EXEC)
1665 pattern = COND_EXEC_CODE (pattern);
1666
1667 if (GET_CODE (pattern) == SET)
1668 return covers_regno_p (SET_DEST (pattern), test_regno);
1669 else if (GET_CODE (pattern) == PARALLEL)
1670 {
1671 int i;
1672
1673 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1674 {
1675 rtx body = XVECEXP (pattern, 0, i);
1676
1677 if (GET_CODE (body) == COND_EXEC)
1678 body = COND_EXEC_CODE (body);
1679
1680 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1681 && covers_regno_p (SET_DEST (body), test_regno))
1682 return 1;
1683 }
1684 }
1685
1686 return 0;
1687 }
1688
1689 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1690 If DATUM is nonzero, look for one whose datum is DATUM. */
1691
1692 rtx
1693 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1694 {
1695 rtx link;
1696
1697 gcc_assert (insn);
1698
1699 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1700 if (! INSN_P (insn))
1701 return 0;
1702 if (datum == 0)
1703 {
1704 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1705 if (REG_NOTE_KIND (link) == kind)
1706 return link;
1707 return 0;
1708 }
1709
1710 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1711 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1712 return link;
1713 return 0;
1714 }
1715
1716 /* Return the reg-note of kind KIND in insn INSN which applies to register
1717 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1718 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1719 it might be the case that the note overlaps REGNO. */
1720
1721 rtx
1722 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1723 {
1724 rtx link;
1725
1726 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1727 if (! INSN_P (insn))
1728 return 0;
1729
1730 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1731 if (REG_NOTE_KIND (link) == kind
1732 /* Verify that it is a register, so that scratch and MEM won't cause a
1733 problem here. */
1734 && REG_P (XEXP (link, 0))
1735 && REGNO (XEXP (link, 0)) <= regno
1736 && END_REGNO (XEXP (link, 0)) > regno)
1737 return link;
1738 return 0;
1739 }
1740
1741 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1742 has such a note. */
1743
1744 rtx
1745 find_reg_equal_equiv_note (const_rtx insn)
1746 {
1747 rtx link;
1748
1749 if (!INSN_P (insn))
1750 return 0;
1751
1752 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1753 if (REG_NOTE_KIND (link) == REG_EQUAL
1754 || REG_NOTE_KIND (link) == REG_EQUIV)
1755 {
1756 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1757 insns that have multiple sets. Checking single_set to
1758 make sure of this is not the proper check, as explained
1759 in the comment in set_unique_reg_note.
1760
1761 This should be changed into an assert. */
1762 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1763 return 0;
1764 return link;
1765 }
1766 return NULL;
1767 }
1768
1769 /* Check whether INSN is a single_set whose source is known to be
1770 equivalent to a constant. Return that constant if so, otherwise
1771 return null. */
1772
1773 rtx
1774 find_constant_src (const_rtx insn)
1775 {
1776 rtx note, set, x;
1777
1778 set = single_set (insn);
1779 if (set)
1780 {
1781 x = avoid_constant_pool_reference (SET_SRC (set));
1782 if (CONSTANT_P (x))
1783 return x;
1784 }
1785
1786 note = find_reg_equal_equiv_note (insn);
1787 if (note && CONSTANT_P (XEXP (note, 0)))
1788 return XEXP (note, 0);
1789
1790 return NULL_RTX;
1791 }
1792
1793 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1794 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1795
1796 int
1797 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1798 {
1799 /* If it's not a CALL_INSN, it can't possibly have a
1800 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1801 if (!CALL_P (insn))
1802 return 0;
1803
1804 gcc_assert (datum);
1805
1806 if (!REG_P (datum))
1807 {
1808 rtx link;
1809
1810 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1811 link;
1812 link = XEXP (link, 1))
1813 if (GET_CODE (XEXP (link, 0)) == code
1814 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1815 return 1;
1816 }
1817 else
1818 {
1819 unsigned int regno = REGNO (datum);
1820
1821 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1822 to pseudo registers, so don't bother checking. */
1823
1824 if (regno < FIRST_PSEUDO_REGISTER)
1825 {
1826 unsigned int end_regno = END_HARD_REGNO (datum);
1827 unsigned int i;
1828
1829 for (i = regno; i < end_regno; i++)
1830 if (find_regno_fusage (insn, code, i))
1831 return 1;
1832 }
1833 }
1834
1835 return 0;
1836 }
1837
1838 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1839 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1840
1841 int
1842 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1843 {
1844 rtx link;
1845
1846 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1847 to pseudo registers, so don't bother checking. */
1848
1849 if (regno >= FIRST_PSEUDO_REGISTER
1850 || !CALL_P (insn) )
1851 return 0;
1852
1853 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1854 {
1855 rtx op, reg;
1856
1857 if (GET_CODE (op = XEXP (link, 0)) == code
1858 && REG_P (reg = XEXP (op, 0))
1859 && REGNO (reg) <= regno
1860 && END_HARD_REGNO (reg) > regno)
1861 return 1;
1862 }
1863
1864 return 0;
1865 }
1866
1867 \f
1868 /* Add register note with kind KIND and datum DATUM to INSN. */
1869
1870 void
1871 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1872 {
1873 rtx note;
1874
1875 switch (kind)
1876 {
1877 case REG_CC_SETTER:
1878 case REG_CC_USER:
1879 case REG_LABEL_TARGET:
1880 case REG_LABEL_OPERAND:
1881 /* These types of register notes use an INSN_LIST rather than an
1882 EXPR_LIST, so that copying is done right and dumps look
1883 better. */
1884 note = alloc_INSN_LIST (datum, REG_NOTES (insn));
1885 PUT_REG_NOTE_KIND (note, kind);
1886 break;
1887
1888 default:
1889 note = alloc_EXPR_LIST (kind, datum, REG_NOTES (insn));
1890 break;
1891 }
1892
1893 REG_NOTES (insn) = note;
1894 }
1895
1896 /* Remove register note NOTE from the REG_NOTES of INSN. */
1897
1898 void
1899 remove_note (rtx insn, const_rtx note)
1900 {
1901 rtx link;
1902
1903 if (note == NULL_RTX)
1904 return;
1905
1906 if (REG_NOTES (insn) == note)
1907 REG_NOTES (insn) = XEXP (note, 1);
1908 else
1909 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1910 if (XEXP (link, 1) == note)
1911 {
1912 XEXP (link, 1) = XEXP (note, 1);
1913 break;
1914 }
1915
1916 switch (REG_NOTE_KIND (note))
1917 {
1918 case REG_EQUAL:
1919 case REG_EQUIV:
1920 df_notes_rescan (insn);
1921 break;
1922 default:
1923 break;
1924 }
1925 }
1926
1927 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1928
1929 void
1930 remove_reg_equal_equiv_notes (rtx insn)
1931 {
1932 rtx *loc;
1933
1934 loc = &REG_NOTES (insn);
1935 while (*loc)
1936 {
1937 enum reg_note kind = REG_NOTE_KIND (*loc);
1938 if (kind == REG_EQUAL || kind == REG_EQUIV)
1939 *loc = XEXP (*loc, 1);
1940 else
1941 loc = &XEXP (*loc, 1);
1942 }
1943 }
1944
1945 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1946 return 1 if it is found. A simple equality test is used to determine if
1947 NODE matches. */
1948
1949 int
1950 in_expr_list_p (const_rtx listp, const_rtx node)
1951 {
1952 const_rtx x;
1953
1954 for (x = listp; x; x = XEXP (x, 1))
1955 if (node == XEXP (x, 0))
1956 return 1;
1957
1958 return 0;
1959 }
1960
1961 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1962 remove that entry from the list if it is found.
1963
1964 A simple equality test is used to determine if NODE matches. */
1965
1966 void
1967 remove_node_from_expr_list (const_rtx node, rtx *listp)
1968 {
1969 rtx temp = *listp;
1970 rtx prev = NULL_RTX;
1971
1972 while (temp)
1973 {
1974 if (node == XEXP (temp, 0))
1975 {
1976 /* Splice the node out of the list. */
1977 if (prev)
1978 XEXP (prev, 1) = XEXP (temp, 1);
1979 else
1980 *listp = XEXP (temp, 1);
1981
1982 return;
1983 }
1984
1985 prev = temp;
1986 temp = XEXP (temp, 1);
1987 }
1988 }
1989 \f
1990 /* Nonzero if X contains any volatile instructions. These are instructions
1991 which may cause unpredictable machine state instructions, and thus no
1992 instructions should be moved or combined across them. This includes
1993 only volatile asms and UNSPEC_VOLATILE instructions. */
1994
1995 int
1996 volatile_insn_p (const_rtx x)
1997 {
1998 const RTX_CODE code = GET_CODE (x);
1999 switch (code)
2000 {
2001 case LABEL_REF:
2002 case SYMBOL_REF:
2003 case CONST_INT:
2004 case CONST:
2005 case CONST_DOUBLE:
2006 case CONST_FIXED:
2007 case CONST_VECTOR:
2008 case CC0:
2009 case PC:
2010 case REG:
2011 case SCRATCH:
2012 case CLOBBER:
2013 case ADDR_VEC:
2014 case ADDR_DIFF_VEC:
2015 case CALL:
2016 case MEM:
2017 return 0;
2018
2019 case UNSPEC_VOLATILE:
2020 /* case TRAP_IF: This isn't clear yet. */
2021 return 1;
2022
2023 case ASM_INPUT:
2024 case ASM_OPERANDS:
2025 if (MEM_VOLATILE_P (x))
2026 return 1;
2027
2028 default:
2029 break;
2030 }
2031
2032 /* Recursively scan the operands of this expression. */
2033
2034 {
2035 const char *const fmt = GET_RTX_FORMAT (code);
2036 int i;
2037
2038 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2039 {
2040 if (fmt[i] == 'e')
2041 {
2042 if (volatile_insn_p (XEXP (x, i)))
2043 return 1;
2044 }
2045 else if (fmt[i] == 'E')
2046 {
2047 int j;
2048 for (j = 0; j < XVECLEN (x, i); j++)
2049 if (volatile_insn_p (XVECEXP (x, i, j)))
2050 return 1;
2051 }
2052 }
2053 }
2054 return 0;
2055 }
2056
2057 /* Nonzero if X contains any volatile memory references
2058 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2059
2060 int
2061 volatile_refs_p (const_rtx x)
2062 {
2063 const RTX_CODE code = GET_CODE (x);
2064 switch (code)
2065 {
2066 case LABEL_REF:
2067 case SYMBOL_REF:
2068 case CONST_INT:
2069 case CONST:
2070 case CONST_DOUBLE:
2071 case CONST_FIXED:
2072 case CONST_VECTOR:
2073 case CC0:
2074 case PC:
2075 case REG:
2076 case SCRATCH:
2077 case CLOBBER:
2078 case ADDR_VEC:
2079 case ADDR_DIFF_VEC:
2080 return 0;
2081
2082 case UNSPEC_VOLATILE:
2083 return 1;
2084
2085 case MEM:
2086 case ASM_INPUT:
2087 case ASM_OPERANDS:
2088 if (MEM_VOLATILE_P (x))
2089 return 1;
2090
2091 default:
2092 break;
2093 }
2094
2095 /* Recursively scan the operands of this expression. */
2096
2097 {
2098 const char *const fmt = GET_RTX_FORMAT (code);
2099 int i;
2100
2101 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2102 {
2103 if (fmt[i] == 'e')
2104 {
2105 if (volatile_refs_p (XEXP (x, i)))
2106 return 1;
2107 }
2108 else if (fmt[i] == 'E')
2109 {
2110 int j;
2111 for (j = 0; j < XVECLEN (x, i); j++)
2112 if (volatile_refs_p (XVECEXP (x, i, j)))
2113 return 1;
2114 }
2115 }
2116 }
2117 return 0;
2118 }
2119
2120 /* Similar to above, except that it also rejects register pre- and post-
2121 incrementing. */
2122
2123 int
2124 side_effects_p (const_rtx x)
2125 {
2126 const RTX_CODE code = GET_CODE (x);
2127 switch (code)
2128 {
2129 case LABEL_REF:
2130 case SYMBOL_REF:
2131 case CONST_INT:
2132 case CONST:
2133 case CONST_DOUBLE:
2134 case CONST_FIXED:
2135 case CONST_VECTOR:
2136 case CC0:
2137 case PC:
2138 case REG:
2139 case SCRATCH:
2140 case ADDR_VEC:
2141 case ADDR_DIFF_VEC:
2142 return 0;
2143
2144 case CLOBBER:
2145 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2146 when some combination can't be done. If we see one, don't think
2147 that we can simplify the expression. */
2148 return (GET_MODE (x) != VOIDmode);
2149
2150 case PRE_INC:
2151 case PRE_DEC:
2152 case POST_INC:
2153 case POST_DEC:
2154 case PRE_MODIFY:
2155 case POST_MODIFY:
2156 case CALL:
2157 case UNSPEC_VOLATILE:
2158 /* case TRAP_IF: This isn't clear yet. */
2159 return 1;
2160
2161 case MEM:
2162 case ASM_INPUT:
2163 case ASM_OPERANDS:
2164 if (MEM_VOLATILE_P (x))
2165 return 1;
2166
2167 default:
2168 break;
2169 }
2170
2171 /* Recursively scan the operands of this expression. */
2172
2173 {
2174 const char *fmt = GET_RTX_FORMAT (code);
2175 int i;
2176
2177 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2178 {
2179 if (fmt[i] == 'e')
2180 {
2181 if (side_effects_p (XEXP (x, i)))
2182 return 1;
2183 }
2184 else if (fmt[i] == 'E')
2185 {
2186 int j;
2187 for (j = 0; j < XVECLEN (x, i); j++)
2188 if (side_effects_p (XVECEXP (x, i, j)))
2189 return 1;
2190 }
2191 }
2192 }
2193 return 0;
2194 }
2195 \f
2196 /* Return nonzero if evaluating rtx X might cause a trap.
2197 FLAGS controls how to consider MEMs. A nonzero means the context
2198 of the access may have changed from the original, such that the
2199 address may have become invalid. */
2200
2201 int
2202 may_trap_p_1 (const_rtx x, unsigned flags)
2203 {
2204 int i;
2205 enum rtx_code code;
2206 const char *fmt;
2207
2208 /* We make no distinction currently, but this function is part of
2209 the internal target-hooks ABI so we keep the parameter as
2210 "unsigned flags". */
2211 bool code_changed = flags != 0;
2212
2213 if (x == 0)
2214 return 0;
2215 code = GET_CODE (x);
2216 switch (code)
2217 {
2218 /* Handle these cases quickly. */
2219 case CONST_INT:
2220 case CONST_DOUBLE:
2221 case CONST_FIXED:
2222 case CONST_VECTOR:
2223 case SYMBOL_REF:
2224 case LABEL_REF:
2225 case CONST:
2226 case PC:
2227 case CC0:
2228 case REG:
2229 case SCRATCH:
2230 return 0;
2231
2232 case UNSPEC:
2233 case UNSPEC_VOLATILE:
2234 return targetm.unspec_may_trap_p (x, flags);
2235
2236 case ASM_INPUT:
2237 case TRAP_IF:
2238 return 1;
2239
2240 case ASM_OPERANDS:
2241 return MEM_VOLATILE_P (x);
2242
2243 /* Memory ref can trap unless it's a static var or a stack slot. */
2244 case MEM:
2245 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2246 reference; moving it out of context such as when moving code
2247 when optimizing, might cause its address to become invalid. */
2248 code_changed
2249 || !MEM_NOTRAP_P (x))
2250 {
2251 HOST_WIDE_INT size = MEM_SIZE (x) ? INTVAL (MEM_SIZE (x)) : 0;
2252 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2253 GET_MODE (x), code_changed);
2254 }
2255
2256 return 0;
2257
2258 /* Division by a non-constant might trap. */
2259 case DIV:
2260 case MOD:
2261 case UDIV:
2262 case UMOD:
2263 if (HONOR_SNANS (GET_MODE (x)))
2264 return 1;
2265 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2266 return flag_trapping_math;
2267 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2268 return 1;
2269 break;
2270
2271 case EXPR_LIST:
2272 /* An EXPR_LIST is used to represent a function call. This
2273 certainly may trap. */
2274 return 1;
2275
2276 case GE:
2277 case GT:
2278 case LE:
2279 case LT:
2280 case LTGT:
2281 case COMPARE:
2282 /* Some floating point comparisons may trap. */
2283 if (!flag_trapping_math)
2284 break;
2285 /* ??? There is no machine independent way to check for tests that trap
2286 when COMPARE is used, though many targets do make this distinction.
2287 For instance, sparc uses CCFPE for compares which generate exceptions
2288 and CCFP for compares which do not generate exceptions. */
2289 if (HONOR_NANS (GET_MODE (x)))
2290 return 1;
2291 /* But often the compare has some CC mode, so check operand
2292 modes as well. */
2293 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2294 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2295 return 1;
2296 break;
2297
2298 case EQ:
2299 case NE:
2300 if (HONOR_SNANS (GET_MODE (x)))
2301 return 1;
2302 /* Often comparison is CC mode, so check operand modes. */
2303 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2304 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2305 return 1;
2306 break;
2307
2308 case FIX:
2309 /* Conversion of floating point might trap. */
2310 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2311 return 1;
2312 break;
2313
2314 case NEG:
2315 case ABS:
2316 case SUBREG:
2317 /* These operations don't trap even with floating point. */
2318 break;
2319
2320 default:
2321 /* Any floating arithmetic may trap. */
2322 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2323 && flag_trapping_math)
2324 return 1;
2325 }
2326
2327 fmt = GET_RTX_FORMAT (code);
2328 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2329 {
2330 if (fmt[i] == 'e')
2331 {
2332 if (may_trap_p_1 (XEXP (x, i), flags))
2333 return 1;
2334 }
2335 else if (fmt[i] == 'E')
2336 {
2337 int j;
2338 for (j = 0; j < XVECLEN (x, i); j++)
2339 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2340 return 1;
2341 }
2342 }
2343 return 0;
2344 }
2345
2346 /* Return nonzero if evaluating rtx X might cause a trap. */
2347
2348 int
2349 may_trap_p (const_rtx x)
2350 {
2351 return may_trap_p_1 (x, 0);
2352 }
2353
2354 /* Same as above, but additionally return nonzero if evaluating rtx X might
2355 cause a fault. We define a fault for the purpose of this function as a
2356 erroneous execution condition that cannot be encountered during the normal
2357 execution of a valid program; the typical example is an unaligned memory
2358 access on a strict alignment machine. The compiler guarantees that it
2359 doesn't generate code that will fault from a valid program, but this
2360 guarantee doesn't mean anything for individual instructions. Consider
2361 the following example:
2362
2363 struct S { int d; union { char *cp; int *ip; }; };
2364
2365 int foo(struct S *s)
2366 {
2367 if (s->d == 1)
2368 return *s->ip;
2369 else
2370 return *s->cp;
2371 }
2372
2373 on a strict alignment machine. In a valid program, foo will never be
2374 invoked on a structure for which d is equal to 1 and the underlying
2375 unique field of the union not aligned on a 4-byte boundary, but the
2376 expression *s->ip might cause a fault if considered individually.
2377
2378 At the RTL level, potentially problematic expressions will almost always
2379 verify may_trap_p; for example, the above dereference can be emitted as
2380 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2381 However, suppose that foo is inlined in a caller that causes s->cp to
2382 point to a local character variable and guarantees that s->d is not set
2383 to 1; foo may have been effectively translated into pseudo-RTL as:
2384
2385 if ((reg:SI) == 1)
2386 (set (reg:SI) (mem:SI (%fp - 7)))
2387 else
2388 (set (reg:QI) (mem:QI (%fp - 7)))
2389
2390 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2391 memory reference to a stack slot, but it will certainly cause a fault
2392 on a strict alignment machine. */
2393
2394 int
2395 may_trap_or_fault_p (const_rtx x)
2396 {
2397 return may_trap_p_1 (x, 1);
2398 }
2399 \f
2400 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2401 i.e., an inequality. */
2402
2403 int
2404 inequality_comparisons_p (const_rtx x)
2405 {
2406 const char *fmt;
2407 int len, i;
2408 const enum rtx_code code = GET_CODE (x);
2409
2410 switch (code)
2411 {
2412 case REG:
2413 case SCRATCH:
2414 case PC:
2415 case CC0:
2416 case CONST_INT:
2417 case CONST_DOUBLE:
2418 case CONST_FIXED:
2419 case CONST_VECTOR:
2420 case CONST:
2421 case LABEL_REF:
2422 case SYMBOL_REF:
2423 return 0;
2424
2425 case LT:
2426 case LTU:
2427 case GT:
2428 case GTU:
2429 case LE:
2430 case LEU:
2431 case GE:
2432 case GEU:
2433 return 1;
2434
2435 default:
2436 break;
2437 }
2438
2439 len = GET_RTX_LENGTH (code);
2440 fmt = GET_RTX_FORMAT (code);
2441
2442 for (i = 0; i < len; i++)
2443 {
2444 if (fmt[i] == 'e')
2445 {
2446 if (inequality_comparisons_p (XEXP (x, i)))
2447 return 1;
2448 }
2449 else if (fmt[i] == 'E')
2450 {
2451 int j;
2452 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2453 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2454 return 1;
2455 }
2456 }
2457
2458 return 0;
2459 }
2460 \f
2461 /* Replace any occurrence of FROM in X with TO. The function does
2462 not enter into CONST_DOUBLE for the replace.
2463
2464 Note that copying is not done so X must not be shared unless all copies
2465 are to be modified. */
2466
2467 rtx
2468 replace_rtx (rtx x, rtx from, rtx to)
2469 {
2470 int i, j;
2471 const char *fmt;
2472
2473 /* The following prevents loops occurrence when we change MEM in
2474 CONST_DOUBLE onto the same CONST_DOUBLE. */
2475 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2476 return x;
2477
2478 if (x == from)
2479 return to;
2480
2481 /* Allow this function to make replacements in EXPR_LISTs. */
2482 if (x == 0)
2483 return 0;
2484
2485 if (GET_CODE (x) == SUBREG)
2486 {
2487 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2488
2489 if (GET_CODE (new_rtx) == CONST_INT)
2490 {
2491 x = simplify_subreg (GET_MODE (x), new_rtx,
2492 GET_MODE (SUBREG_REG (x)),
2493 SUBREG_BYTE (x));
2494 gcc_assert (x);
2495 }
2496 else
2497 SUBREG_REG (x) = new_rtx;
2498
2499 return x;
2500 }
2501 else if (GET_CODE (x) == ZERO_EXTEND)
2502 {
2503 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2504
2505 if (GET_CODE (new_rtx) == CONST_INT)
2506 {
2507 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2508 new_rtx, GET_MODE (XEXP (x, 0)));
2509 gcc_assert (x);
2510 }
2511 else
2512 XEXP (x, 0) = new_rtx;
2513
2514 return x;
2515 }
2516
2517 fmt = GET_RTX_FORMAT (GET_CODE (x));
2518 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2519 {
2520 if (fmt[i] == 'e')
2521 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2522 else if (fmt[i] == 'E')
2523 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2524 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2525 }
2526
2527 return x;
2528 }
2529 \f
2530 /* Replace occurrences of the old label in *X with the new one.
2531 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2532
2533 int
2534 replace_label (rtx *x, void *data)
2535 {
2536 rtx l = *x;
2537 rtx old_label = ((replace_label_data *) data)->r1;
2538 rtx new_label = ((replace_label_data *) data)->r2;
2539 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2540
2541 if (l == NULL_RTX)
2542 return 0;
2543
2544 if (GET_CODE (l) == SYMBOL_REF
2545 && CONSTANT_POOL_ADDRESS_P (l))
2546 {
2547 rtx c = get_pool_constant (l);
2548 if (rtx_referenced_p (old_label, c))
2549 {
2550 rtx new_c, new_l;
2551 replace_label_data *d = (replace_label_data *) data;
2552
2553 /* Create a copy of constant C; replace the label inside
2554 but do not update LABEL_NUSES because uses in constant pool
2555 are not counted. */
2556 new_c = copy_rtx (c);
2557 d->update_label_nuses = false;
2558 for_each_rtx (&new_c, replace_label, data);
2559 d->update_label_nuses = update_label_nuses;
2560
2561 /* Add the new constant NEW_C to constant pool and replace
2562 the old reference to constant by new reference. */
2563 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2564 *x = replace_rtx (l, l, new_l);
2565 }
2566 return 0;
2567 }
2568
2569 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2570 field. This is not handled by for_each_rtx because it doesn't
2571 handle unprinted ('0') fields. */
2572 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2573 JUMP_LABEL (l) = new_label;
2574
2575 if ((GET_CODE (l) == LABEL_REF
2576 || GET_CODE (l) == INSN_LIST)
2577 && XEXP (l, 0) == old_label)
2578 {
2579 XEXP (l, 0) = new_label;
2580 if (update_label_nuses)
2581 {
2582 ++LABEL_NUSES (new_label);
2583 --LABEL_NUSES (old_label);
2584 }
2585 return 0;
2586 }
2587
2588 return 0;
2589 }
2590
2591 /* When *BODY is equal to X or X is directly referenced by *BODY
2592 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2593 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2594
2595 static int
2596 rtx_referenced_p_1 (rtx *body, void *x)
2597 {
2598 rtx y = (rtx) x;
2599
2600 if (*body == NULL_RTX)
2601 return y == NULL_RTX;
2602
2603 /* Return true if a label_ref *BODY refers to label Y. */
2604 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2605 return XEXP (*body, 0) == y;
2606
2607 /* If *BODY is a reference to pool constant traverse the constant. */
2608 if (GET_CODE (*body) == SYMBOL_REF
2609 && CONSTANT_POOL_ADDRESS_P (*body))
2610 return rtx_referenced_p (y, get_pool_constant (*body));
2611
2612 /* By default, compare the RTL expressions. */
2613 return rtx_equal_p (*body, y);
2614 }
2615
2616 /* Return true if X is referenced in BODY. */
2617
2618 int
2619 rtx_referenced_p (rtx x, rtx body)
2620 {
2621 return for_each_rtx (&body, rtx_referenced_p_1, x);
2622 }
2623
2624 /* If INSN is a tablejump return true and store the label (before jump table) to
2625 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2626
2627 bool
2628 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2629 {
2630 rtx label, table;
2631
2632 if (JUMP_P (insn)
2633 && (label = JUMP_LABEL (insn)) != NULL_RTX
2634 && (table = next_active_insn (label)) != NULL_RTX
2635 && JUMP_P (table)
2636 && (GET_CODE (PATTERN (table)) == ADDR_VEC
2637 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2638 {
2639 if (labelp)
2640 *labelp = label;
2641 if (tablep)
2642 *tablep = table;
2643 return true;
2644 }
2645 return false;
2646 }
2647
2648 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2649 constant that is not in the constant pool and not in the condition
2650 of an IF_THEN_ELSE. */
2651
2652 static int
2653 computed_jump_p_1 (const_rtx x)
2654 {
2655 const enum rtx_code code = GET_CODE (x);
2656 int i, j;
2657 const char *fmt;
2658
2659 switch (code)
2660 {
2661 case LABEL_REF:
2662 case PC:
2663 return 0;
2664
2665 case CONST:
2666 case CONST_INT:
2667 case CONST_DOUBLE:
2668 case CONST_FIXED:
2669 case CONST_VECTOR:
2670 case SYMBOL_REF:
2671 case REG:
2672 return 1;
2673
2674 case MEM:
2675 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2676 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2677
2678 case IF_THEN_ELSE:
2679 return (computed_jump_p_1 (XEXP (x, 1))
2680 || computed_jump_p_1 (XEXP (x, 2)));
2681
2682 default:
2683 break;
2684 }
2685
2686 fmt = GET_RTX_FORMAT (code);
2687 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2688 {
2689 if (fmt[i] == 'e'
2690 && computed_jump_p_1 (XEXP (x, i)))
2691 return 1;
2692
2693 else if (fmt[i] == 'E')
2694 for (j = 0; j < XVECLEN (x, i); j++)
2695 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2696 return 1;
2697 }
2698
2699 return 0;
2700 }
2701
2702 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2703
2704 Tablejumps and casesi insns are not considered indirect jumps;
2705 we can recognize them by a (use (label_ref)). */
2706
2707 int
2708 computed_jump_p (const_rtx insn)
2709 {
2710 int i;
2711 if (JUMP_P (insn))
2712 {
2713 rtx pat = PATTERN (insn);
2714
2715 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2716 if (JUMP_LABEL (insn) != NULL)
2717 return 0;
2718
2719 if (GET_CODE (pat) == PARALLEL)
2720 {
2721 int len = XVECLEN (pat, 0);
2722 int has_use_labelref = 0;
2723
2724 for (i = len - 1; i >= 0; i--)
2725 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2726 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2727 == LABEL_REF))
2728 has_use_labelref = 1;
2729
2730 if (! has_use_labelref)
2731 for (i = len - 1; i >= 0; i--)
2732 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2733 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2734 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2735 return 1;
2736 }
2737 else if (GET_CODE (pat) == SET
2738 && SET_DEST (pat) == pc_rtx
2739 && computed_jump_p_1 (SET_SRC (pat)))
2740 return 1;
2741 }
2742 return 0;
2743 }
2744
2745 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2746 calls. Processes the subexpressions of EXP and passes them to F. */
2747 static int
2748 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2749 {
2750 int result, i, j;
2751 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2752 rtx *x;
2753
2754 for (; format[n] != '\0'; n++)
2755 {
2756 switch (format[n])
2757 {
2758 case 'e':
2759 /* Call F on X. */
2760 x = &XEXP (exp, n);
2761 result = (*f) (x, data);
2762 if (result == -1)
2763 /* Do not traverse sub-expressions. */
2764 continue;
2765 else if (result != 0)
2766 /* Stop the traversal. */
2767 return result;
2768
2769 if (*x == NULL_RTX)
2770 /* There are no sub-expressions. */
2771 continue;
2772
2773 i = non_rtx_starting_operands[GET_CODE (*x)];
2774 if (i >= 0)
2775 {
2776 result = for_each_rtx_1 (*x, i, f, data);
2777 if (result != 0)
2778 return result;
2779 }
2780 break;
2781
2782 case 'V':
2783 case 'E':
2784 if (XVEC (exp, n) == 0)
2785 continue;
2786 for (j = 0; j < XVECLEN (exp, n); ++j)
2787 {
2788 /* Call F on X. */
2789 x = &XVECEXP (exp, n, j);
2790 result = (*f) (x, data);
2791 if (result == -1)
2792 /* Do not traverse sub-expressions. */
2793 continue;
2794 else if (result != 0)
2795 /* Stop the traversal. */
2796 return result;
2797
2798 if (*x == NULL_RTX)
2799 /* There are no sub-expressions. */
2800 continue;
2801
2802 i = non_rtx_starting_operands[GET_CODE (*x)];
2803 if (i >= 0)
2804 {
2805 result = for_each_rtx_1 (*x, i, f, data);
2806 if (result != 0)
2807 return result;
2808 }
2809 }
2810 break;
2811
2812 default:
2813 /* Nothing to do. */
2814 break;
2815 }
2816 }
2817
2818 return 0;
2819 }
2820
2821 /* Traverse X via depth-first search, calling F for each
2822 sub-expression (including X itself). F is also passed the DATA.
2823 If F returns -1, do not traverse sub-expressions, but continue
2824 traversing the rest of the tree. If F ever returns any other
2825 nonzero value, stop the traversal, and return the value returned
2826 by F. Otherwise, return 0. This function does not traverse inside
2827 tree structure that contains RTX_EXPRs, or into sub-expressions
2828 whose format code is `0' since it is not known whether or not those
2829 codes are actually RTL.
2830
2831 This routine is very general, and could (should?) be used to
2832 implement many of the other routines in this file. */
2833
2834 int
2835 for_each_rtx (rtx *x, rtx_function f, void *data)
2836 {
2837 int result;
2838 int i;
2839
2840 /* Call F on X. */
2841 result = (*f) (x, data);
2842 if (result == -1)
2843 /* Do not traverse sub-expressions. */
2844 return 0;
2845 else if (result != 0)
2846 /* Stop the traversal. */
2847 return result;
2848
2849 if (*x == NULL_RTX)
2850 /* There are no sub-expressions. */
2851 return 0;
2852
2853 i = non_rtx_starting_operands[GET_CODE (*x)];
2854 if (i < 0)
2855 return 0;
2856
2857 return for_each_rtx_1 (*x, i, f, data);
2858 }
2859
2860
2861 /* Searches X for any reference to REGNO, returning the rtx of the
2862 reference found if any. Otherwise, returns NULL_RTX. */
2863
2864 rtx
2865 regno_use_in (unsigned int regno, rtx x)
2866 {
2867 const char *fmt;
2868 int i, j;
2869 rtx tem;
2870
2871 if (REG_P (x) && REGNO (x) == regno)
2872 return x;
2873
2874 fmt = GET_RTX_FORMAT (GET_CODE (x));
2875 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2876 {
2877 if (fmt[i] == 'e')
2878 {
2879 if ((tem = regno_use_in (regno, XEXP (x, i))))
2880 return tem;
2881 }
2882 else if (fmt[i] == 'E')
2883 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2884 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2885 return tem;
2886 }
2887
2888 return NULL_RTX;
2889 }
2890
2891 /* Return a value indicating whether OP, an operand of a commutative
2892 operation, is preferred as the first or second operand. The higher
2893 the value, the stronger the preference for being the first operand.
2894 We use negative values to indicate a preference for the first operand
2895 and positive values for the second operand. */
2896
2897 int
2898 commutative_operand_precedence (rtx op)
2899 {
2900 enum rtx_code code = GET_CODE (op);
2901
2902 /* Constants always come the second operand. Prefer "nice" constants. */
2903 if (code == CONST_INT)
2904 return -8;
2905 if (code == CONST_DOUBLE)
2906 return -7;
2907 if (code == CONST_FIXED)
2908 return -7;
2909 op = avoid_constant_pool_reference (op);
2910 code = GET_CODE (op);
2911
2912 switch (GET_RTX_CLASS (code))
2913 {
2914 case RTX_CONST_OBJ:
2915 if (code == CONST_INT)
2916 return -6;
2917 if (code == CONST_DOUBLE)
2918 return -5;
2919 if (code == CONST_FIXED)
2920 return -5;
2921 return -4;
2922
2923 case RTX_EXTRA:
2924 /* SUBREGs of objects should come second. */
2925 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2926 return -3;
2927 return 0;
2928
2929 case RTX_OBJ:
2930 /* Complex expressions should be the first, so decrease priority
2931 of objects. Prefer pointer objects over non pointer objects. */
2932 if ((REG_P (op) && REG_POINTER (op))
2933 || (MEM_P (op) && MEM_POINTER (op)))
2934 return -1;
2935 return -2;
2936
2937 case RTX_COMM_ARITH:
2938 /* Prefer operands that are themselves commutative to be first.
2939 This helps to make things linear. In particular,
2940 (and (and (reg) (reg)) (not (reg))) is canonical. */
2941 return 4;
2942
2943 case RTX_BIN_ARITH:
2944 /* If only one operand is a binary expression, it will be the first
2945 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2946 is canonical, although it will usually be further simplified. */
2947 return 2;
2948
2949 case RTX_UNARY:
2950 /* Then prefer NEG and NOT. */
2951 if (code == NEG || code == NOT)
2952 return 1;
2953
2954 default:
2955 return 0;
2956 }
2957 }
2958
2959 /* Return 1 iff it is necessary to swap operands of commutative operation
2960 in order to canonicalize expression. */
2961
2962 bool
2963 swap_commutative_operands_p (rtx x, rtx y)
2964 {
2965 return (commutative_operand_precedence (x)
2966 < commutative_operand_precedence (y));
2967 }
2968
2969 /* Return 1 if X is an autoincrement side effect and the register is
2970 not the stack pointer. */
2971 int
2972 auto_inc_p (const_rtx x)
2973 {
2974 switch (GET_CODE (x))
2975 {
2976 case PRE_INC:
2977 case POST_INC:
2978 case PRE_DEC:
2979 case POST_DEC:
2980 case PRE_MODIFY:
2981 case POST_MODIFY:
2982 /* There are no REG_INC notes for SP. */
2983 if (XEXP (x, 0) != stack_pointer_rtx)
2984 return 1;
2985 default:
2986 break;
2987 }
2988 return 0;
2989 }
2990
2991 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2992 int
2993 loc_mentioned_in_p (rtx *loc, const_rtx in)
2994 {
2995 enum rtx_code code;
2996 const char *fmt;
2997 int i, j;
2998
2999 if (!in)
3000 return 0;
3001
3002 code = GET_CODE (in);
3003 fmt = GET_RTX_FORMAT (code);
3004 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3005 {
3006 if (fmt[i] == 'e')
3007 {
3008 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3009 return 1;
3010 }
3011 else if (fmt[i] == 'E')
3012 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3013 if (loc == &XVECEXP (in, i, j)
3014 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3015 return 1;
3016 }
3017 return 0;
3018 }
3019
3020 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3021 and SUBREG_BYTE, return the bit offset where the subreg begins
3022 (counting from the least significant bit of the operand). */
3023
3024 unsigned int
3025 subreg_lsb_1 (enum machine_mode outer_mode,
3026 enum machine_mode inner_mode,
3027 unsigned int subreg_byte)
3028 {
3029 unsigned int bitpos;
3030 unsigned int byte;
3031 unsigned int word;
3032
3033 /* A paradoxical subreg begins at bit position 0. */
3034 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
3035 return 0;
3036
3037 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3038 /* If the subreg crosses a word boundary ensure that
3039 it also begins and ends on a word boundary. */
3040 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3041 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3042 && (subreg_byte % UNITS_PER_WORD
3043 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3044
3045 if (WORDS_BIG_ENDIAN)
3046 word = (GET_MODE_SIZE (inner_mode)
3047 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3048 else
3049 word = subreg_byte / UNITS_PER_WORD;
3050 bitpos = word * BITS_PER_WORD;
3051
3052 if (BYTES_BIG_ENDIAN)
3053 byte = (GET_MODE_SIZE (inner_mode)
3054 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3055 else
3056 byte = subreg_byte % UNITS_PER_WORD;
3057 bitpos += byte * BITS_PER_UNIT;
3058
3059 return bitpos;
3060 }
3061
3062 /* Given a subreg X, return the bit offset where the subreg begins
3063 (counting from the least significant bit of the reg). */
3064
3065 unsigned int
3066 subreg_lsb (const_rtx x)
3067 {
3068 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3069 SUBREG_BYTE (x));
3070 }
3071
3072 /* Fill in information about a subreg of a hard register.
3073 xregno - A regno of an inner hard subreg_reg (or what will become one).
3074 xmode - The mode of xregno.
3075 offset - The byte offset.
3076 ymode - The mode of a top level SUBREG (or what may become one).
3077 info - Pointer to structure to fill in. */
3078 void
3079 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3080 unsigned int offset, enum machine_mode ymode,
3081 struct subreg_info *info)
3082 {
3083 int nregs_xmode, nregs_ymode;
3084 int mode_multiple, nregs_multiple;
3085 int offset_adj, y_offset, y_offset_adj;
3086 int regsize_xmode, regsize_ymode;
3087 bool rknown;
3088
3089 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3090
3091 rknown = false;
3092
3093 /* If there are holes in a non-scalar mode in registers, we expect
3094 that it is made up of its units concatenated together. */
3095 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3096 {
3097 enum machine_mode xmode_unit;
3098
3099 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3100 if (GET_MODE_INNER (xmode) == VOIDmode)
3101 xmode_unit = xmode;
3102 else
3103 xmode_unit = GET_MODE_INNER (xmode);
3104 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3105 gcc_assert (nregs_xmode
3106 == (GET_MODE_NUNITS (xmode)
3107 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3108 gcc_assert (hard_regno_nregs[xregno][xmode]
3109 == (hard_regno_nregs[xregno][xmode_unit]
3110 * GET_MODE_NUNITS (xmode)));
3111
3112 /* You can only ask for a SUBREG of a value with holes in the middle
3113 if you don't cross the holes. (Such a SUBREG should be done by
3114 picking a different register class, or doing it in memory if
3115 necessary.) An example of a value with holes is XCmode on 32-bit
3116 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3117 3 for each part, but in memory it's two 128-bit parts.
3118 Padding is assumed to be at the end (not necessarily the 'high part')
3119 of each unit. */
3120 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3121 < GET_MODE_NUNITS (xmode))
3122 && (offset / GET_MODE_SIZE (xmode_unit)
3123 != ((offset + GET_MODE_SIZE (ymode) - 1)
3124 / GET_MODE_SIZE (xmode_unit))))
3125 {
3126 info->representable_p = false;
3127 rknown = true;
3128 }
3129 }
3130 else
3131 nregs_xmode = hard_regno_nregs[xregno][xmode];
3132
3133 nregs_ymode = hard_regno_nregs[xregno][ymode];
3134
3135 /* Paradoxical subregs are otherwise valid. */
3136 if (!rknown
3137 && offset == 0
3138 && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode))
3139 {
3140 info->representable_p = true;
3141 /* If this is a big endian paradoxical subreg, which uses more
3142 actual hard registers than the original register, we must
3143 return a negative offset so that we find the proper highpart
3144 of the register. */
3145 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3146 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3147 info->offset = nregs_xmode - nregs_ymode;
3148 else
3149 info->offset = 0;
3150 info->nregs = nregs_ymode;
3151 return;
3152 }
3153
3154 /* If registers store different numbers of bits in the different
3155 modes, we cannot generally form this subreg. */
3156 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3157 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3158 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3159 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3160 {
3161 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3162 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3163 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3164 {
3165 info->representable_p = false;
3166 info->nregs
3167 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3168 info->offset = offset / regsize_xmode;
3169 return;
3170 }
3171 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3172 {
3173 info->representable_p = false;
3174 info->nregs
3175 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3176 info->offset = offset / regsize_xmode;
3177 return;
3178 }
3179 }
3180
3181 /* Lowpart subregs are otherwise valid. */
3182 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3183 {
3184 info->representable_p = true;
3185 rknown = true;
3186
3187 if (offset == 0 || nregs_xmode == nregs_ymode)
3188 {
3189 info->offset = 0;
3190 info->nregs = nregs_ymode;
3191 return;
3192 }
3193 }
3194
3195 /* This should always pass, otherwise we don't know how to verify
3196 the constraint. These conditions may be relaxed but
3197 subreg_regno_offset would need to be redesigned. */
3198 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3199 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3200
3201 /* The XMODE value can be seen as a vector of NREGS_XMODE
3202 values. The subreg must represent a lowpart of given field.
3203 Compute what field it is. */
3204 offset_adj = offset;
3205 offset_adj -= subreg_lowpart_offset (ymode,
3206 mode_for_size (GET_MODE_BITSIZE (xmode)
3207 / nregs_xmode,
3208 MODE_INT, 0));
3209
3210 /* Size of ymode must not be greater than the size of xmode. */
3211 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3212 gcc_assert (mode_multiple != 0);
3213
3214 y_offset = offset / GET_MODE_SIZE (ymode);
3215 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3216 nregs_multiple = nregs_xmode / nregs_ymode;
3217
3218 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3219 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3220
3221 if (!rknown)
3222 {
3223 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3224 rknown = true;
3225 }
3226 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3227 info->nregs = nregs_ymode;
3228 }
3229
3230 /* This function returns the regno offset of a subreg expression.
3231 xregno - A regno of an inner hard subreg_reg (or what will become one).
3232 xmode - The mode of xregno.
3233 offset - The byte offset.
3234 ymode - The mode of a top level SUBREG (or what may become one).
3235 RETURN - The regno offset which would be used. */
3236 unsigned int
3237 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3238 unsigned int offset, enum machine_mode ymode)
3239 {
3240 struct subreg_info info;
3241 subreg_get_info (xregno, xmode, offset, ymode, &info);
3242 return info.offset;
3243 }
3244
3245 /* This function returns true when the offset is representable via
3246 subreg_offset in the given regno.
3247 xregno - A regno of an inner hard subreg_reg (or what will become one).
3248 xmode - The mode of xregno.
3249 offset - The byte offset.
3250 ymode - The mode of a top level SUBREG (or what may become one).
3251 RETURN - Whether the offset is representable. */
3252 bool
3253 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3254 unsigned int offset, enum machine_mode ymode)
3255 {
3256 struct subreg_info info;
3257 subreg_get_info (xregno, xmode, offset, ymode, &info);
3258 return info.representable_p;
3259 }
3260
3261 /* Return the number of a YMODE register to which
3262
3263 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3264
3265 can be simplified. Return -1 if the subreg can't be simplified.
3266
3267 XREGNO is a hard register number. */
3268
3269 int
3270 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3271 unsigned int offset, enum machine_mode ymode)
3272 {
3273 struct subreg_info info;
3274 unsigned int yregno;
3275
3276 #ifdef CANNOT_CHANGE_MODE_CLASS
3277 /* Give the backend a chance to disallow the mode change. */
3278 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3279 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3280 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode))
3281 return -1;
3282 #endif
3283
3284 /* We shouldn't simplify stack-related registers. */
3285 if ((!reload_completed || frame_pointer_needed)
3286 && (xregno == FRAME_POINTER_REGNUM
3287 || xregno == HARD_FRAME_POINTER_REGNUM))
3288 return -1;
3289
3290 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3291 && xregno == ARG_POINTER_REGNUM)
3292 return -1;
3293
3294 if (xregno == STACK_POINTER_REGNUM)
3295 return -1;
3296
3297 /* Try to get the register offset. */
3298 subreg_get_info (xregno, xmode, offset, ymode, &info);
3299 if (!info.representable_p)
3300 return -1;
3301
3302 /* Make sure that the offsetted register value is in range. */
3303 yregno = xregno + info.offset;
3304 if (!HARD_REGISTER_NUM_P (yregno))
3305 return -1;
3306
3307 /* See whether (reg:YMODE YREGNO) is valid.
3308
3309 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3310 This is a kludge to work around how float/complex arguments are passed
3311 on 32-bit SPARC and should be fixed. */
3312 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3313 && HARD_REGNO_MODE_OK (xregno, xmode))
3314 return -1;
3315
3316 return (int) yregno;
3317 }
3318
3319 /* Return the final regno that a subreg expression refers to. */
3320 unsigned int
3321 subreg_regno (const_rtx x)
3322 {
3323 unsigned int ret;
3324 rtx subreg = SUBREG_REG (x);
3325 int regno = REGNO (subreg);
3326
3327 ret = regno + subreg_regno_offset (regno,
3328 GET_MODE (subreg),
3329 SUBREG_BYTE (x),
3330 GET_MODE (x));
3331 return ret;
3332
3333 }
3334
3335 /* Return the number of registers that a subreg expression refers
3336 to. */
3337 unsigned int
3338 subreg_nregs (const_rtx x)
3339 {
3340 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3341 }
3342
3343 /* Return the number of registers that a subreg REG with REGNO
3344 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3345 changed so that the regno can be passed in. */
3346
3347 unsigned int
3348 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3349 {
3350 struct subreg_info info;
3351 rtx subreg = SUBREG_REG (x);
3352
3353 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3354 &info);
3355 return info.nregs;
3356 }
3357
3358
3359 struct parms_set_data
3360 {
3361 int nregs;
3362 HARD_REG_SET regs;
3363 };
3364
3365 /* Helper function for noticing stores to parameter registers. */
3366 static void
3367 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3368 {
3369 struct parms_set_data *const d = (struct parms_set_data *) data;
3370 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3371 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3372 {
3373 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3374 d->nregs--;
3375 }
3376 }
3377
3378 /* Look backward for first parameter to be loaded.
3379 Note that loads of all parameters will not necessarily be
3380 found if CSE has eliminated some of them (e.g., an argument
3381 to the outer function is passed down as a parameter).
3382 Do not skip BOUNDARY. */
3383 rtx
3384 find_first_parameter_load (rtx call_insn, rtx boundary)
3385 {
3386 struct parms_set_data parm;
3387 rtx p, before, first_set;
3388
3389 /* Since different machines initialize their parameter registers
3390 in different orders, assume nothing. Collect the set of all
3391 parameter registers. */
3392 CLEAR_HARD_REG_SET (parm.regs);
3393 parm.nregs = 0;
3394 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3395 if (GET_CODE (XEXP (p, 0)) == USE
3396 && REG_P (XEXP (XEXP (p, 0), 0)))
3397 {
3398 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3399
3400 /* We only care about registers which can hold function
3401 arguments. */
3402 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3403 continue;
3404
3405 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3406 parm.nregs++;
3407 }
3408 before = call_insn;
3409 first_set = call_insn;
3410
3411 /* Search backward for the first set of a register in this set. */
3412 while (parm.nregs && before != boundary)
3413 {
3414 before = PREV_INSN (before);
3415
3416 /* It is possible that some loads got CSEed from one call to
3417 another. Stop in that case. */
3418 if (CALL_P (before))
3419 break;
3420
3421 /* Our caller needs either ensure that we will find all sets
3422 (in case code has not been optimized yet), or take care
3423 for possible labels in a way by setting boundary to preceding
3424 CODE_LABEL. */
3425 if (LABEL_P (before))
3426 {
3427 gcc_assert (before == boundary);
3428 break;
3429 }
3430
3431 if (INSN_P (before))
3432 {
3433 int nregs_old = parm.nregs;
3434 note_stores (PATTERN (before), parms_set, &parm);
3435 /* If we found something that did not set a parameter reg,
3436 we're done. Do not keep going, as that might result
3437 in hoisting an insn before the setting of a pseudo
3438 that is used by the hoisted insn. */
3439 if (nregs_old != parm.nregs)
3440 first_set = before;
3441 else
3442 break;
3443 }
3444 }
3445 return first_set;
3446 }
3447
3448 /* Return true if we should avoid inserting code between INSN and preceding
3449 call instruction. */
3450
3451 bool
3452 keep_with_call_p (const_rtx insn)
3453 {
3454 rtx set;
3455
3456 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3457 {
3458 if (REG_P (SET_DEST (set))
3459 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3460 && fixed_regs[REGNO (SET_DEST (set))]
3461 && general_operand (SET_SRC (set), VOIDmode))
3462 return true;
3463 if (REG_P (SET_SRC (set))
3464 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
3465 && REG_P (SET_DEST (set))
3466 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3467 return true;
3468 /* There may be a stack pop just after the call and before the store
3469 of the return register. Search for the actual store when deciding
3470 if we can break or not. */
3471 if (SET_DEST (set) == stack_pointer_rtx)
3472 {
3473 /* This CONST_CAST is okay because next_nonnote_insn just
3474 returns its argument and we assign it to a const_rtx
3475 variable. */
3476 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3477 if (i2 && keep_with_call_p (i2))
3478 return true;
3479 }
3480 }
3481 return false;
3482 }
3483
3484 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3485 to non-complex jumps. That is, direct unconditional, conditional,
3486 and tablejumps, but not computed jumps or returns. It also does
3487 not apply to the fallthru case of a conditional jump. */
3488
3489 bool
3490 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3491 {
3492 rtx tmp = JUMP_LABEL (jump_insn);
3493
3494 if (label == tmp)
3495 return true;
3496
3497 if (tablejump_p (jump_insn, NULL, &tmp))
3498 {
3499 rtvec vec = XVEC (PATTERN (tmp),
3500 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3501 int i, veclen = GET_NUM_ELEM (vec);
3502
3503 for (i = 0; i < veclen; ++i)
3504 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3505 return true;
3506 }
3507
3508 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3509 return true;
3510
3511 return false;
3512 }
3513
3514 \f
3515 /* Return an estimate of the cost of computing rtx X.
3516 One use is in cse, to decide which expression to keep in the hash table.
3517 Another is in rtl generation, to pick the cheapest way to multiply.
3518 Other uses like the latter are expected in the future.
3519
3520 SPEED parameter specify whether costs optimized for speed or size should
3521 be returned. */
3522
3523 int
3524 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED, bool speed)
3525 {
3526 int i, j;
3527 enum rtx_code code;
3528 const char *fmt;
3529 int total;
3530
3531 if (x == 0)
3532 return 0;
3533
3534 /* Compute the default costs of certain things.
3535 Note that targetm.rtx_costs can override the defaults. */
3536
3537 code = GET_CODE (x);
3538 switch (code)
3539 {
3540 case MULT:
3541 total = COSTS_N_INSNS (5);
3542 break;
3543 case DIV:
3544 case UDIV:
3545 case MOD:
3546 case UMOD:
3547 total = COSTS_N_INSNS (7);
3548 break;
3549 case USE:
3550 /* Used in combine.c as a marker. */
3551 total = 0;
3552 break;
3553 default:
3554 total = COSTS_N_INSNS (1);
3555 }
3556
3557 switch (code)
3558 {
3559 case REG:
3560 return 0;
3561
3562 case SUBREG:
3563 total = 0;
3564 /* If we can't tie these modes, make this expensive. The larger
3565 the mode, the more expensive it is. */
3566 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3567 return COSTS_N_INSNS (2
3568 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3569 break;
3570
3571 default:
3572 if (targetm.rtx_costs (x, code, outer_code, &total, speed))
3573 return total;
3574 break;
3575 }
3576
3577 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3578 which is already in total. */
3579
3580 fmt = GET_RTX_FORMAT (code);
3581 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3582 if (fmt[i] == 'e')
3583 total += rtx_cost (XEXP (x, i), code, speed);
3584 else if (fmt[i] == 'E')
3585 for (j = 0; j < XVECLEN (x, i); j++)
3586 total += rtx_cost (XVECEXP (x, i, j), code, speed);
3587
3588 return total;
3589 }
3590 \f
3591 /* Return cost of address expression X.
3592 Expect that X is properly formed address reference.
3593
3594 SPEED parameter specify whether costs optimized for speed or size should
3595 be returned. */
3596
3597 int
3598 address_cost (rtx x, enum machine_mode mode, bool speed)
3599 {
3600 /* We may be asked for cost of various unusual addresses, such as operands
3601 of push instruction. It is not worthwhile to complicate writing
3602 of the target hook by such cases. */
3603
3604 if (!memory_address_p (mode, x))
3605 return 1000;
3606
3607 return targetm.address_cost (x, speed);
3608 }
3609
3610 /* If the target doesn't override, compute the cost as with arithmetic. */
3611
3612 int
3613 default_address_cost (rtx x, bool speed)
3614 {
3615 return rtx_cost (x, MEM, speed);
3616 }
3617 \f
3618
3619 unsigned HOST_WIDE_INT
3620 nonzero_bits (const_rtx x, enum machine_mode mode)
3621 {
3622 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3623 }
3624
3625 unsigned int
3626 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3627 {
3628 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3629 }
3630
3631 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3632 It avoids exponential behavior in nonzero_bits1 when X has
3633 identical subexpressions on the first or the second level. */
3634
3635 static unsigned HOST_WIDE_INT
3636 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3637 enum machine_mode known_mode,
3638 unsigned HOST_WIDE_INT known_ret)
3639 {
3640 if (x == known_x && mode == known_mode)
3641 return known_ret;
3642
3643 /* Try to find identical subexpressions. If found call
3644 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3645 precomputed value for the subexpression as KNOWN_RET. */
3646
3647 if (ARITHMETIC_P (x))
3648 {
3649 rtx x0 = XEXP (x, 0);
3650 rtx x1 = XEXP (x, 1);
3651
3652 /* Check the first level. */
3653 if (x0 == x1)
3654 return nonzero_bits1 (x, mode, x0, mode,
3655 cached_nonzero_bits (x0, mode, known_x,
3656 known_mode, known_ret));
3657
3658 /* Check the second level. */
3659 if (ARITHMETIC_P (x0)
3660 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3661 return nonzero_bits1 (x, mode, x1, mode,
3662 cached_nonzero_bits (x1, mode, known_x,
3663 known_mode, known_ret));
3664
3665 if (ARITHMETIC_P (x1)
3666 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3667 return nonzero_bits1 (x, mode, x0, mode,
3668 cached_nonzero_bits (x0, mode, known_x,
3669 known_mode, known_ret));
3670 }
3671
3672 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3673 }
3674
3675 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3676 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3677 is less useful. We can't allow both, because that results in exponential
3678 run time recursion. There is a nullstone testcase that triggered
3679 this. This macro avoids accidental uses of num_sign_bit_copies. */
3680 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3681
3682 /* Given an expression, X, compute which bits in X can be nonzero.
3683 We don't care about bits outside of those defined in MODE.
3684
3685 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3686 an arithmetic operation, we can do better. */
3687
3688 static unsigned HOST_WIDE_INT
3689 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3690 enum machine_mode known_mode,
3691 unsigned HOST_WIDE_INT known_ret)
3692 {
3693 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3694 unsigned HOST_WIDE_INT inner_nz;
3695 enum rtx_code code;
3696 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3697
3698 /* For floating-point and vector values, assume all bits are needed. */
3699 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3700 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3701 return nonzero;
3702
3703 /* If X is wider than MODE, use its mode instead. */
3704 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3705 {
3706 mode = GET_MODE (x);
3707 nonzero = GET_MODE_MASK (mode);
3708 mode_width = GET_MODE_BITSIZE (mode);
3709 }
3710
3711 if (mode_width > HOST_BITS_PER_WIDE_INT)
3712 /* Our only callers in this case look for single bit values. So
3713 just return the mode mask. Those tests will then be false. */
3714 return nonzero;
3715
3716 #ifndef WORD_REGISTER_OPERATIONS
3717 /* If MODE is wider than X, but both are a single word for both the host
3718 and target machines, we can compute this from which bits of the
3719 object might be nonzero in its own mode, taking into account the fact
3720 that on many CISC machines, accessing an object in a wider mode
3721 causes the high-order bits to become undefined. So they are
3722 not known to be zero. */
3723
3724 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3725 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3726 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3727 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3728 {
3729 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3730 known_x, known_mode, known_ret);
3731 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3732 return nonzero;
3733 }
3734 #endif
3735
3736 code = GET_CODE (x);
3737 switch (code)
3738 {
3739 case REG:
3740 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3741 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3742 all the bits above ptr_mode are known to be zero. */
3743 if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3744 && REG_POINTER (x))
3745 nonzero &= GET_MODE_MASK (ptr_mode);
3746 #endif
3747
3748 /* Include declared information about alignment of pointers. */
3749 /* ??? We don't properly preserve REG_POINTER changes across
3750 pointer-to-integer casts, so we can't trust it except for
3751 things that we know must be pointers. See execute/960116-1.c. */
3752 if ((x == stack_pointer_rtx
3753 || x == frame_pointer_rtx
3754 || x == arg_pointer_rtx)
3755 && REGNO_POINTER_ALIGN (REGNO (x)))
3756 {
3757 unsigned HOST_WIDE_INT alignment
3758 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3759
3760 #ifdef PUSH_ROUNDING
3761 /* If PUSH_ROUNDING is defined, it is possible for the
3762 stack to be momentarily aligned only to that amount,
3763 so we pick the least alignment. */
3764 if (x == stack_pointer_rtx && PUSH_ARGS)
3765 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3766 alignment);
3767 #endif
3768
3769 nonzero &= ~(alignment - 1);
3770 }
3771
3772 {
3773 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3774 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3775 known_mode, known_ret,
3776 &nonzero_for_hook);
3777
3778 if (new_rtx)
3779 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
3780 known_mode, known_ret);
3781
3782 return nonzero_for_hook;
3783 }
3784
3785 case CONST_INT:
3786 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3787 /* If X is negative in MODE, sign-extend the value. */
3788 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3789 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3790 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3791 #endif
3792
3793 return INTVAL (x);
3794
3795 case MEM:
3796 #ifdef LOAD_EXTEND_OP
3797 /* In many, if not most, RISC machines, reading a byte from memory
3798 zeros the rest of the register. Noticing that fact saves a lot
3799 of extra zero-extends. */
3800 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3801 nonzero &= GET_MODE_MASK (GET_MODE (x));
3802 #endif
3803 break;
3804
3805 case EQ: case NE:
3806 case UNEQ: case LTGT:
3807 case GT: case GTU: case UNGT:
3808 case LT: case LTU: case UNLT:
3809 case GE: case GEU: case UNGE:
3810 case LE: case LEU: case UNLE:
3811 case UNORDERED: case ORDERED:
3812 /* If this produces an integer result, we know which bits are set.
3813 Code here used to clear bits outside the mode of X, but that is
3814 now done above. */
3815 /* Mind that MODE is the mode the caller wants to look at this
3816 operation in, and not the actual operation mode. We can wind
3817 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3818 that describes the results of a vector compare. */
3819 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3820 && mode_width <= HOST_BITS_PER_WIDE_INT)
3821 nonzero = STORE_FLAG_VALUE;
3822 break;
3823
3824 case NEG:
3825 #if 0
3826 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3827 and num_sign_bit_copies. */
3828 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3829 == GET_MODE_BITSIZE (GET_MODE (x)))
3830 nonzero = 1;
3831 #endif
3832
3833 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3834 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3835 break;
3836
3837 case ABS:
3838 #if 0
3839 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3840 and num_sign_bit_copies. */
3841 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3842 == GET_MODE_BITSIZE (GET_MODE (x)))
3843 nonzero = 1;
3844 #endif
3845 break;
3846
3847 case TRUNCATE:
3848 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3849 known_x, known_mode, known_ret)
3850 & GET_MODE_MASK (mode));
3851 break;
3852
3853 case ZERO_EXTEND:
3854 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3855 known_x, known_mode, known_ret);
3856 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3857 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3858 break;
3859
3860 case SIGN_EXTEND:
3861 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3862 Otherwise, show all the bits in the outer mode but not the inner
3863 may be nonzero. */
3864 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3865 known_x, known_mode, known_ret);
3866 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3867 {
3868 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3869 if (inner_nz
3870 & (((HOST_WIDE_INT) 1
3871 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3872 inner_nz |= (GET_MODE_MASK (mode)
3873 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3874 }
3875
3876 nonzero &= inner_nz;
3877 break;
3878
3879 case AND:
3880 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3881 known_x, known_mode, known_ret)
3882 & cached_nonzero_bits (XEXP (x, 1), mode,
3883 known_x, known_mode, known_ret);
3884 break;
3885
3886 case XOR: case IOR:
3887 case UMIN: case UMAX: case SMIN: case SMAX:
3888 {
3889 unsigned HOST_WIDE_INT nonzero0 =
3890 cached_nonzero_bits (XEXP (x, 0), mode,
3891 known_x, known_mode, known_ret);
3892
3893 /* Don't call nonzero_bits for the second time if it cannot change
3894 anything. */
3895 if ((nonzero & nonzero0) != nonzero)
3896 nonzero &= nonzero0
3897 | cached_nonzero_bits (XEXP (x, 1), mode,
3898 known_x, known_mode, known_ret);
3899 }
3900 break;
3901
3902 case PLUS: case MINUS:
3903 case MULT:
3904 case DIV: case UDIV:
3905 case MOD: case UMOD:
3906 /* We can apply the rules of arithmetic to compute the number of
3907 high- and low-order zero bits of these operations. We start by
3908 computing the width (position of the highest-order nonzero bit)
3909 and the number of low-order zero bits for each value. */
3910 {
3911 unsigned HOST_WIDE_INT nz0 =
3912 cached_nonzero_bits (XEXP (x, 0), mode,
3913 known_x, known_mode, known_ret);
3914 unsigned HOST_WIDE_INT nz1 =
3915 cached_nonzero_bits (XEXP (x, 1), mode,
3916 known_x, known_mode, known_ret);
3917 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3918 int width0 = floor_log2 (nz0) + 1;
3919 int width1 = floor_log2 (nz1) + 1;
3920 int low0 = floor_log2 (nz0 & -nz0);
3921 int low1 = floor_log2 (nz1 & -nz1);
3922 HOST_WIDE_INT op0_maybe_minusp
3923 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3924 HOST_WIDE_INT op1_maybe_minusp
3925 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3926 unsigned int result_width = mode_width;
3927 int result_low = 0;
3928
3929 switch (code)
3930 {
3931 case PLUS:
3932 result_width = MAX (width0, width1) + 1;
3933 result_low = MIN (low0, low1);
3934 break;
3935 case MINUS:
3936 result_low = MIN (low0, low1);
3937 break;
3938 case MULT:
3939 result_width = width0 + width1;
3940 result_low = low0 + low1;
3941 break;
3942 case DIV:
3943 if (width1 == 0)
3944 break;
3945 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3946 result_width = width0;
3947 break;
3948 case UDIV:
3949 if (width1 == 0)
3950 break;
3951 result_width = width0;
3952 break;
3953 case MOD:
3954 if (width1 == 0)
3955 break;
3956 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3957 result_width = MIN (width0, width1);
3958 result_low = MIN (low0, low1);
3959 break;
3960 case UMOD:
3961 if (width1 == 0)
3962 break;
3963 result_width = MIN (width0, width1);
3964 result_low = MIN (low0, low1);
3965 break;
3966 default:
3967 gcc_unreachable ();
3968 }
3969
3970 if (result_width < mode_width)
3971 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3972
3973 if (result_low > 0)
3974 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3975
3976 #ifdef POINTERS_EXTEND_UNSIGNED
3977 /* If pointers extend unsigned and this is an addition or subtraction
3978 to a pointer in Pmode, all the bits above ptr_mode are known to be
3979 zero. */
3980 if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
3981 && (code == PLUS || code == MINUS)
3982 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
3983 nonzero &= GET_MODE_MASK (ptr_mode);
3984 #endif
3985 }
3986 break;
3987
3988 case ZERO_EXTRACT:
3989 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3990 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3991 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
3992 break;
3993
3994 case SUBREG:
3995 /* If this is a SUBREG formed for a promoted variable that has
3996 been zero-extended, we know that at least the high-order bits
3997 are zero, though others might be too. */
3998
3999 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4000 nonzero = GET_MODE_MASK (GET_MODE (x))
4001 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4002 known_x, known_mode, known_ret);
4003
4004 /* If the inner mode is a single word for both the host and target
4005 machines, we can compute this from which bits of the inner
4006 object might be nonzero. */
4007 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
4008 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4009 <= HOST_BITS_PER_WIDE_INT))
4010 {
4011 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4012 known_x, known_mode, known_ret);
4013
4014 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4015 /* If this is a typical RISC machine, we only have to worry
4016 about the way loads are extended. */
4017 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4018 ? (((nonzero
4019 & (((unsigned HOST_WIDE_INT) 1
4020 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
4021 != 0))
4022 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
4023 || !MEM_P (SUBREG_REG (x)))
4024 #endif
4025 {
4026 /* On many CISC machines, accessing an object in a wider mode
4027 causes the high-order bits to become undefined. So they are
4028 not known to be zero. */
4029 if (GET_MODE_SIZE (GET_MODE (x))
4030 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4031 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4032 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
4033 }
4034 }
4035 break;
4036
4037 case ASHIFTRT:
4038 case LSHIFTRT:
4039 case ASHIFT:
4040 case ROTATE:
4041 /* The nonzero bits are in two classes: any bits within MODE
4042 that aren't in GET_MODE (x) are always significant. The rest of the
4043 nonzero bits are those that are significant in the operand of
4044 the shift when shifted the appropriate number of bits. This
4045 shows that high-order bits are cleared by the right shift and
4046 low-order bits by left shifts. */
4047 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4048 && INTVAL (XEXP (x, 1)) >= 0
4049 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4050 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x)))
4051 {
4052 enum machine_mode inner_mode = GET_MODE (x);
4053 unsigned int width = GET_MODE_BITSIZE (inner_mode);
4054 int count = INTVAL (XEXP (x, 1));
4055 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4056 unsigned HOST_WIDE_INT op_nonzero =
4057 cached_nonzero_bits (XEXP (x, 0), mode,
4058 known_x, known_mode, known_ret);
4059 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4060 unsigned HOST_WIDE_INT outer = 0;
4061
4062 if (mode_width > width)
4063 outer = (op_nonzero & nonzero & ~mode_mask);
4064
4065 if (code == LSHIFTRT)
4066 inner >>= count;
4067 else if (code == ASHIFTRT)
4068 {
4069 inner >>= count;
4070
4071 /* If the sign bit may have been nonzero before the shift, we
4072 need to mark all the places it could have been copied to
4073 by the shift as possibly nonzero. */
4074 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
4075 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
4076 }
4077 else if (code == ASHIFT)
4078 inner <<= count;
4079 else
4080 inner = ((inner << (count % width)
4081 | (inner >> (width - (count % width)))) & mode_mask);
4082
4083 nonzero &= (outer | inner);
4084 }
4085 break;
4086
4087 case FFS:
4088 case POPCOUNT:
4089 /* This is at most the number of bits in the mode. */
4090 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4091 break;
4092
4093 case CLZ:
4094 /* If CLZ has a known value at zero, then the nonzero bits are
4095 that value, plus the number of bits in the mode minus one. */
4096 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4097 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4098 else
4099 nonzero = -1;
4100 break;
4101
4102 case CTZ:
4103 /* If CTZ has a known value at zero, then the nonzero bits are
4104 that value, plus the number of bits in the mode minus one. */
4105 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4106 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4107 else
4108 nonzero = -1;
4109 break;
4110
4111 case PARITY:
4112 nonzero = 1;
4113 break;
4114
4115 case IF_THEN_ELSE:
4116 {
4117 unsigned HOST_WIDE_INT nonzero_true =
4118 cached_nonzero_bits (XEXP (x, 1), mode,
4119 known_x, known_mode, known_ret);
4120
4121 /* Don't call nonzero_bits for the second time if it cannot change
4122 anything. */
4123 if ((nonzero & nonzero_true) != nonzero)
4124 nonzero &= nonzero_true
4125 | cached_nonzero_bits (XEXP (x, 2), mode,
4126 known_x, known_mode, known_ret);
4127 }
4128 break;
4129
4130 default:
4131 break;
4132 }
4133
4134 return nonzero;
4135 }
4136
4137 /* See the macro definition above. */
4138 #undef cached_num_sign_bit_copies
4139
4140 \f
4141 /* The function cached_num_sign_bit_copies is a wrapper around
4142 num_sign_bit_copies1. It avoids exponential behavior in
4143 num_sign_bit_copies1 when X has identical subexpressions on the
4144 first or the second level. */
4145
4146 static unsigned int
4147 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4148 enum machine_mode known_mode,
4149 unsigned int known_ret)
4150 {
4151 if (x == known_x && mode == known_mode)
4152 return known_ret;
4153
4154 /* Try to find identical subexpressions. If found call
4155 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4156 the precomputed value for the subexpression as KNOWN_RET. */
4157
4158 if (ARITHMETIC_P (x))
4159 {
4160 rtx x0 = XEXP (x, 0);
4161 rtx x1 = XEXP (x, 1);
4162
4163 /* Check the first level. */
4164 if (x0 == x1)
4165 return
4166 num_sign_bit_copies1 (x, mode, x0, mode,
4167 cached_num_sign_bit_copies (x0, mode, known_x,
4168 known_mode,
4169 known_ret));
4170
4171 /* Check the second level. */
4172 if (ARITHMETIC_P (x0)
4173 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4174 return
4175 num_sign_bit_copies1 (x, mode, x1, mode,
4176 cached_num_sign_bit_copies (x1, mode, known_x,
4177 known_mode,
4178 known_ret));
4179
4180 if (ARITHMETIC_P (x1)
4181 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4182 return
4183 num_sign_bit_copies1 (x, mode, x0, mode,
4184 cached_num_sign_bit_copies (x0, mode, known_x,
4185 known_mode,
4186 known_ret));
4187 }
4188
4189 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4190 }
4191
4192 /* Return the number of bits at the high-order end of X that are known to
4193 be equal to the sign bit. X will be used in mode MODE; if MODE is
4194 VOIDmode, X will be used in its own mode. The returned value will always
4195 be between 1 and the number of bits in MODE. */
4196
4197 static unsigned int
4198 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4199 enum machine_mode known_mode,
4200 unsigned int known_ret)
4201 {
4202 enum rtx_code code = GET_CODE (x);
4203 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
4204 int num0, num1, result;
4205 unsigned HOST_WIDE_INT nonzero;
4206
4207 /* If we weren't given a mode, use the mode of X. If the mode is still
4208 VOIDmode, we don't know anything. Likewise if one of the modes is
4209 floating-point. */
4210
4211 if (mode == VOIDmode)
4212 mode = GET_MODE (x);
4213
4214 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4215 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4216 return 1;
4217
4218 /* For a smaller object, just ignore the high bits. */
4219 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
4220 {
4221 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4222 known_x, known_mode, known_ret);
4223 return MAX (1,
4224 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
4225 }
4226
4227 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
4228 {
4229 #ifndef WORD_REGISTER_OPERATIONS
4230 /* If this machine does not do all register operations on the entire
4231 register and MODE is wider than the mode of X, we can say nothing
4232 at all about the high-order bits. */
4233 return 1;
4234 #else
4235 /* Likewise on machines that do, if the mode of the object is smaller
4236 than a word and loads of that size don't sign extend, we can say
4237 nothing about the high order bits. */
4238 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
4239 #ifdef LOAD_EXTEND_OP
4240 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4241 #endif
4242 )
4243 return 1;
4244 #endif
4245 }
4246
4247 switch (code)
4248 {
4249 case REG:
4250
4251 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4252 /* If pointers extend signed and this is a pointer in Pmode, say that
4253 all the bits above ptr_mode are known to be sign bit copies. */
4254 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
4255 && REG_POINTER (x))
4256 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
4257 #endif
4258
4259 {
4260 unsigned int copies_for_hook = 1, copies = 1;
4261 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4262 known_mode, known_ret,
4263 &copies_for_hook);
4264
4265 if (new_rtx)
4266 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4267 known_mode, known_ret);
4268
4269 if (copies > 1 || copies_for_hook > 1)
4270 return MAX (copies, copies_for_hook);
4271
4272 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4273 }
4274 break;
4275
4276 case MEM:
4277 #ifdef LOAD_EXTEND_OP
4278 /* Some RISC machines sign-extend all loads of smaller than a word. */
4279 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4280 return MAX (1, ((int) bitwidth
4281 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
4282 #endif
4283 break;
4284
4285 case CONST_INT:
4286 /* If the constant is negative, take its 1's complement and remask.
4287 Then see how many zero bits we have. */
4288 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
4289 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4290 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4291 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4292
4293 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4294
4295 case SUBREG:
4296 /* If this is a SUBREG for a promoted object that is sign-extended
4297 and we are looking at it in a wider mode, we know that at least the
4298 high-order bits are known to be sign bit copies. */
4299
4300 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4301 {
4302 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4303 known_x, known_mode, known_ret);
4304 return MAX ((int) bitwidth
4305 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4306 num0);
4307 }
4308
4309 /* For a smaller object, just ignore the high bits. */
4310 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4311 {
4312 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4313 known_x, known_mode, known_ret);
4314 return MAX (1, (num0
4315 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4316 - bitwidth)));
4317 }
4318
4319 #ifdef WORD_REGISTER_OPERATIONS
4320 #ifdef LOAD_EXTEND_OP
4321 /* For paradoxical SUBREGs on machines where all register operations
4322 affect the entire register, just look inside. Note that we are
4323 passing MODE to the recursive call, so the number of sign bit copies
4324 will remain relative to that mode, not the inner mode. */
4325
4326 /* This works only if loads sign extend. Otherwise, if we get a
4327 reload for the inner part, it may be loaded from the stack, and
4328 then we lose all sign bit copies that existed before the store
4329 to the stack. */
4330
4331 if ((GET_MODE_SIZE (GET_MODE (x))
4332 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4333 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4334 && MEM_P (SUBREG_REG (x)))
4335 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4336 known_x, known_mode, known_ret);
4337 #endif
4338 #endif
4339 break;
4340
4341 case SIGN_EXTRACT:
4342 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4343 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4344 break;
4345
4346 case SIGN_EXTEND:
4347 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4348 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4349 known_x, known_mode, known_ret));
4350
4351 case TRUNCATE:
4352 /* For a smaller object, just ignore the high bits. */
4353 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4354 known_x, known_mode, known_ret);
4355 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4356 - bitwidth)));
4357
4358 case NOT:
4359 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4360 known_x, known_mode, known_ret);
4361
4362 case ROTATE: case ROTATERT:
4363 /* If we are rotating left by a number of bits less than the number
4364 of sign bit copies, we can just subtract that amount from the
4365 number. */
4366 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4367 && INTVAL (XEXP (x, 1)) >= 0
4368 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4369 {
4370 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4371 known_x, known_mode, known_ret);
4372 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4373 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4374 }
4375 break;
4376
4377 case NEG:
4378 /* In general, this subtracts one sign bit copy. But if the value
4379 is known to be positive, the number of sign bit copies is the
4380 same as that of the input. Finally, if the input has just one bit
4381 that might be nonzero, all the bits are copies of the sign bit. */
4382 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4383 known_x, known_mode, known_ret);
4384 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4385 return num0 > 1 ? num0 - 1 : 1;
4386
4387 nonzero = nonzero_bits (XEXP (x, 0), mode);
4388 if (nonzero == 1)
4389 return bitwidth;
4390
4391 if (num0 > 1
4392 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4393 num0--;
4394
4395 return num0;
4396
4397 case IOR: case AND: case XOR:
4398 case SMIN: case SMAX: case UMIN: case UMAX:
4399 /* Logical operations will preserve the number of sign-bit copies.
4400 MIN and MAX operations always return one of the operands. */
4401 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4402 known_x, known_mode, known_ret);
4403 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4404 known_x, known_mode, known_ret);
4405
4406 /* If num1 is clearing some of the top bits then regardless of
4407 the other term, we are guaranteed to have at least that many
4408 high-order zero bits. */
4409 if (code == AND
4410 && num1 > 1
4411 && bitwidth <= HOST_BITS_PER_WIDE_INT
4412 && GET_CODE (XEXP (x, 1)) == CONST_INT
4413 && !(INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
4414 return num1;
4415
4416 /* Similarly for IOR when setting high-order bits. */
4417 if (code == IOR
4418 && num1 > 1
4419 && bitwidth <= HOST_BITS_PER_WIDE_INT
4420 && GET_CODE (XEXP (x, 1)) == CONST_INT
4421 && (INTVAL (XEXP (x, 1)) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))))
4422 return num1;
4423
4424 return MIN (num0, num1);
4425
4426 case PLUS: case MINUS:
4427 /* For addition and subtraction, we can have a 1-bit carry. However,
4428 if we are subtracting 1 from a positive number, there will not
4429 be such a carry. Furthermore, if the positive number is known to
4430 be 0 or 1, we know the result is either -1 or 0. */
4431
4432 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4433 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4434 {
4435 nonzero = nonzero_bits (XEXP (x, 0), mode);
4436 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4437 return (nonzero == 1 || nonzero == 0 ? bitwidth
4438 : bitwidth - floor_log2 (nonzero) - 1);
4439 }
4440
4441 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4442 known_x, known_mode, known_ret);
4443 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4444 known_x, known_mode, known_ret);
4445 result = MAX (1, MIN (num0, num1) - 1);
4446
4447 #ifdef POINTERS_EXTEND_UNSIGNED
4448 /* If pointers extend signed and this is an addition or subtraction
4449 to a pointer in Pmode, all the bits above ptr_mode are known to be
4450 sign bit copies. */
4451 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4452 && (code == PLUS || code == MINUS)
4453 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4454 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4455 - GET_MODE_BITSIZE (ptr_mode) + 1),
4456 result);
4457 #endif
4458 return result;
4459
4460 case MULT:
4461 /* The number of bits of the product is the sum of the number of
4462 bits of both terms. However, unless one of the terms if known
4463 to be positive, we must allow for an additional bit since negating
4464 a negative number can remove one sign bit copy. */
4465
4466 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4467 known_x, known_mode, known_ret);
4468 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4469 known_x, known_mode, known_ret);
4470
4471 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4472 if (result > 0
4473 && (bitwidth > HOST_BITS_PER_WIDE_INT
4474 || (((nonzero_bits (XEXP (x, 0), mode)
4475 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4476 && ((nonzero_bits (XEXP (x, 1), mode)
4477 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4478 result--;
4479
4480 return MAX (1, result);
4481
4482 case UDIV:
4483 /* The result must be <= the first operand. If the first operand
4484 has the high bit set, we know nothing about the number of sign
4485 bit copies. */
4486 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4487 return 1;
4488 else if ((nonzero_bits (XEXP (x, 0), mode)
4489 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4490 return 1;
4491 else
4492 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4493 known_x, known_mode, known_ret);
4494
4495 case UMOD:
4496 /* The result must be <= the second operand. */
4497 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4498 known_x, known_mode, known_ret);
4499
4500 case DIV:
4501 /* Similar to unsigned division, except that we have to worry about
4502 the case where the divisor is negative, in which case we have
4503 to add 1. */
4504 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4505 known_x, known_mode, known_ret);
4506 if (result > 1
4507 && (bitwidth > HOST_BITS_PER_WIDE_INT
4508 || (nonzero_bits (XEXP (x, 1), mode)
4509 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4510 result--;
4511
4512 return result;
4513
4514 case MOD:
4515 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4516 known_x, known_mode, known_ret);
4517 if (result > 1
4518 && (bitwidth > HOST_BITS_PER_WIDE_INT
4519 || (nonzero_bits (XEXP (x, 1), mode)
4520 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4521 result--;
4522
4523 return result;
4524
4525 case ASHIFTRT:
4526 /* Shifts by a constant add to the number of bits equal to the
4527 sign bit. */
4528 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4529 known_x, known_mode, known_ret);
4530 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4531 && INTVAL (XEXP (x, 1)) > 0
4532 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x)))
4533 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4534
4535 return num0;
4536
4537 case ASHIFT:
4538 /* Left shifts destroy copies. */
4539 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4540 || INTVAL (XEXP (x, 1)) < 0
4541 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4542 || INTVAL (XEXP (x, 1)) >= GET_MODE_BITSIZE (GET_MODE (x)))
4543 return 1;
4544
4545 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4546 known_x, known_mode, known_ret);
4547 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4548
4549 case IF_THEN_ELSE:
4550 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4551 known_x, known_mode, known_ret);
4552 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4553 known_x, known_mode, known_ret);
4554 return MIN (num0, num1);
4555
4556 case EQ: case NE: case GE: case GT: case LE: case LT:
4557 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4558 case GEU: case GTU: case LEU: case LTU:
4559 case UNORDERED: case ORDERED:
4560 /* If the constant is negative, take its 1's complement and remask.
4561 Then see how many zero bits we have. */
4562 nonzero = STORE_FLAG_VALUE;
4563 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4564 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4565 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4566
4567 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4568
4569 default:
4570 break;
4571 }
4572
4573 /* If we haven't been able to figure it out by one of the above rules,
4574 see if some of the high-order bits are known to be zero. If so,
4575 count those bits and return one less than that amount. If we can't
4576 safely compute the mask for this mode, always return BITWIDTH. */
4577
4578 bitwidth = GET_MODE_BITSIZE (mode);
4579 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4580 return 1;
4581
4582 nonzero = nonzero_bits (x, mode);
4583 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4584 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4585 }
4586
4587 /* Calculate the rtx_cost of a single instruction. A return value of
4588 zero indicates an instruction pattern without a known cost. */
4589
4590 int
4591 insn_rtx_cost (rtx pat, bool speed)
4592 {
4593 int i, cost;
4594 rtx set;
4595
4596 /* Extract the single set rtx from the instruction pattern.
4597 We can't use single_set since we only have the pattern. */
4598 if (GET_CODE (pat) == SET)
4599 set = pat;
4600 else if (GET_CODE (pat) == PARALLEL)
4601 {
4602 set = NULL_RTX;
4603 for (i = 0; i < XVECLEN (pat, 0); i++)
4604 {
4605 rtx x = XVECEXP (pat, 0, i);
4606 if (GET_CODE (x) == SET)
4607 {
4608 if (set)
4609 return 0;
4610 set = x;
4611 }
4612 }
4613 if (!set)
4614 return 0;
4615 }
4616 else
4617 return 0;
4618
4619 cost = rtx_cost (SET_SRC (set), SET, speed);
4620 return cost > 0 ? cost : COSTS_N_INSNS (1);
4621 }
4622
4623 /* Given an insn INSN and condition COND, return the condition in a
4624 canonical form to simplify testing by callers. Specifically:
4625
4626 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4627 (2) Both operands will be machine operands; (cc0) will have been replaced.
4628 (3) If an operand is a constant, it will be the second operand.
4629 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4630 for GE, GEU, and LEU.
4631
4632 If the condition cannot be understood, or is an inequality floating-point
4633 comparison which needs to be reversed, 0 will be returned.
4634
4635 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4636
4637 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4638 insn used in locating the condition was found. If a replacement test
4639 of the condition is desired, it should be placed in front of that
4640 insn and we will be sure that the inputs are still valid.
4641
4642 If WANT_REG is nonzero, we wish the condition to be relative to that
4643 register, if possible. Therefore, do not canonicalize the condition
4644 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4645 to be a compare to a CC mode register.
4646
4647 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4648 and at INSN. */
4649
4650 rtx
4651 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4652 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4653 {
4654 enum rtx_code code;
4655 rtx prev = insn;
4656 const_rtx set;
4657 rtx tem;
4658 rtx op0, op1;
4659 int reverse_code = 0;
4660 enum machine_mode mode;
4661 basic_block bb = BLOCK_FOR_INSN (insn);
4662
4663 code = GET_CODE (cond);
4664 mode = GET_MODE (cond);
4665 op0 = XEXP (cond, 0);
4666 op1 = XEXP (cond, 1);
4667
4668 if (reverse)
4669 code = reversed_comparison_code (cond, insn);
4670 if (code == UNKNOWN)
4671 return 0;
4672
4673 if (earliest)
4674 *earliest = insn;
4675
4676 /* If we are comparing a register with zero, see if the register is set
4677 in the previous insn to a COMPARE or a comparison operation. Perform
4678 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4679 in cse.c */
4680
4681 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4682 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4683 && op1 == CONST0_RTX (GET_MODE (op0))
4684 && op0 != want_reg)
4685 {
4686 /* Set nonzero when we find something of interest. */
4687 rtx x = 0;
4688
4689 #ifdef HAVE_cc0
4690 /* If comparison with cc0, import actual comparison from compare
4691 insn. */
4692 if (op0 == cc0_rtx)
4693 {
4694 if ((prev = prev_nonnote_insn (prev)) == 0
4695 || !NONJUMP_INSN_P (prev)
4696 || (set = single_set (prev)) == 0
4697 || SET_DEST (set) != cc0_rtx)
4698 return 0;
4699
4700 op0 = SET_SRC (set);
4701 op1 = CONST0_RTX (GET_MODE (op0));
4702 if (earliest)
4703 *earliest = prev;
4704 }
4705 #endif
4706
4707 /* If this is a COMPARE, pick up the two things being compared. */
4708 if (GET_CODE (op0) == COMPARE)
4709 {
4710 op1 = XEXP (op0, 1);
4711 op0 = XEXP (op0, 0);
4712 continue;
4713 }
4714 else if (!REG_P (op0))
4715 break;
4716
4717 /* Go back to the previous insn. Stop if it is not an INSN. We also
4718 stop if it isn't a single set or if it has a REG_INC note because
4719 we don't want to bother dealing with it. */
4720
4721 if ((prev = prev_nonnote_insn (prev)) == 0
4722 || !NONJUMP_INSN_P (prev)
4723 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4724 /* In cfglayout mode, there do not have to be labels at the
4725 beginning of a block, or jumps at the end, so the previous
4726 conditions would not stop us when we reach bb boundary. */
4727 || BLOCK_FOR_INSN (prev) != bb)
4728 break;
4729
4730 set = set_of (op0, prev);
4731
4732 if (set
4733 && (GET_CODE (set) != SET
4734 || !rtx_equal_p (SET_DEST (set), op0)))
4735 break;
4736
4737 /* If this is setting OP0, get what it sets it to if it looks
4738 relevant. */
4739 if (set)
4740 {
4741 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4742 #ifdef FLOAT_STORE_FLAG_VALUE
4743 REAL_VALUE_TYPE fsfv;
4744 #endif
4745
4746 /* ??? We may not combine comparisons done in a CCmode with
4747 comparisons not done in a CCmode. This is to aid targets
4748 like Alpha that have an IEEE compliant EQ instruction, and
4749 a non-IEEE compliant BEQ instruction. The use of CCmode is
4750 actually artificial, simply to prevent the combination, but
4751 should not affect other platforms.
4752
4753 However, we must allow VOIDmode comparisons to match either
4754 CCmode or non-CCmode comparison, because some ports have
4755 modeless comparisons inside branch patterns.
4756
4757 ??? This mode check should perhaps look more like the mode check
4758 in simplify_comparison in combine. */
4759
4760 if ((GET_CODE (SET_SRC (set)) == COMPARE
4761 || (((code == NE
4762 || (code == LT
4763 && GET_MODE_CLASS (inner_mode) == MODE_INT
4764 && (GET_MODE_BITSIZE (inner_mode)
4765 <= HOST_BITS_PER_WIDE_INT)
4766 && (STORE_FLAG_VALUE
4767 & ((HOST_WIDE_INT) 1
4768 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4769 #ifdef FLOAT_STORE_FLAG_VALUE
4770 || (code == LT
4771 && SCALAR_FLOAT_MODE_P (inner_mode)
4772 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4773 REAL_VALUE_NEGATIVE (fsfv)))
4774 #endif
4775 ))
4776 && COMPARISON_P (SET_SRC (set))))
4777 && (((GET_MODE_CLASS (mode) == MODE_CC)
4778 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4779 || mode == VOIDmode || inner_mode == VOIDmode))
4780 x = SET_SRC (set);
4781 else if (((code == EQ
4782 || (code == GE
4783 && (GET_MODE_BITSIZE (inner_mode)
4784 <= HOST_BITS_PER_WIDE_INT)
4785 && GET_MODE_CLASS (inner_mode) == MODE_INT
4786 && (STORE_FLAG_VALUE
4787 & ((HOST_WIDE_INT) 1
4788 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4789 #ifdef FLOAT_STORE_FLAG_VALUE
4790 || (code == GE
4791 && SCALAR_FLOAT_MODE_P (inner_mode)
4792 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4793 REAL_VALUE_NEGATIVE (fsfv)))
4794 #endif
4795 ))
4796 && COMPARISON_P (SET_SRC (set))
4797 && (((GET_MODE_CLASS (mode) == MODE_CC)
4798 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4799 || mode == VOIDmode || inner_mode == VOIDmode))
4800
4801 {
4802 reverse_code = 1;
4803 x = SET_SRC (set);
4804 }
4805 else
4806 break;
4807 }
4808
4809 else if (reg_set_p (op0, prev))
4810 /* If this sets OP0, but not directly, we have to give up. */
4811 break;
4812
4813 if (x)
4814 {
4815 /* If the caller is expecting the condition to be valid at INSN,
4816 make sure X doesn't change before INSN. */
4817 if (valid_at_insn_p)
4818 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4819 break;
4820 if (COMPARISON_P (x))
4821 code = GET_CODE (x);
4822 if (reverse_code)
4823 {
4824 code = reversed_comparison_code (x, prev);
4825 if (code == UNKNOWN)
4826 return 0;
4827 reverse_code = 0;
4828 }
4829
4830 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4831 if (earliest)
4832 *earliest = prev;
4833 }
4834 }
4835
4836 /* If constant is first, put it last. */
4837 if (CONSTANT_P (op0))
4838 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4839
4840 /* If OP0 is the result of a comparison, we weren't able to find what
4841 was really being compared, so fail. */
4842 if (!allow_cc_mode
4843 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4844 return 0;
4845
4846 /* Canonicalize any ordered comparison with integers involving equality
4847 if we can do computations in the relevant mode and we do not
4848 overflow. */
4849
4850 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4851 && GET_CODE (op1) == CONST_INT
4852 && GET_MODE (op0) != VOIDmode
4853 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4854 {
4855 HOST_WIDE_INT const_val = INTVAL (op1);
4856 unsigned HOST_WIDE_INT uconst_val = const_val;
4857 unsigned HOST_WIDE_INT max_val
4858 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4859
4860 switch (code)
4861 {
4862 case LE:
4863 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4864 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4865 break;
4866
4867 /* When cross-compiling, const_val might be sign-extended from
4868 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4869 case GE:
4870 if ((HOST_WIDE_INT) (const_val & max_val)
4871 != (((HOST_WIDE_INT) 1
4872 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4873 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4874 break;
4875
4876 case LEU:
4877 if (uconst_val < max_val)
4878 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4879 break;
4880
4881 case GEU:
4882 if (uconst_val != 0)
4883 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4884 break;
4885
4886 default:
4887 break;
4888 }
4889 }
4890
4891 /* Never return CC0; return zero instead. */
4892 if (CC0_P (op0))
4893 return 0;
4894
4895 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4896 }
4897
4898 /* Given a jump insn JUMP, return the condition that will cause it to branch
4899 to its JUMP_LABEL. If the condition cannot be understood, or is an
4900 inequality floating-point comparison which needs to be reversed, 0 will
4901 be returned.
4902
4903 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4904 insn used in locating the condition was found. If a replacement test
4905 of the condition is desired, it should be placed in front of that
4906 insn and we will be sure that the inputs are still valid. If EARLIEST
4907 is null, the returned condition will be valid at INSN.
4908
4909 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4910 compare CC mode register.
4911
4912 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4913
4914 rtx
4915 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4916 {
4917 rtx cond;
4918 int reverse;
4919 rtx set;
4920
4921 /* If this is not a standard conditional jump, we can't parse it. */
4922 if (!JUMP_P (jump)
4923 || ! any_condjump_p (jump))
4924 return 0;
4925 set = pc_set (jump);
4926
4927 cond = XEXP (SET_SRC (set), 0);
4928
4929 /* If this branches to JUMP_LABEL when the condition is false, reverse
4930 the condition. */
4931 reverse
4932 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4933 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4934
4935 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4936 allow_cc_mode, valid_at_insn_p);
4937 }
4938
4939 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4940 TARGET_MODE_REP_EXTENDED.
4941
4942 Note that we assume that the property of
4943 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4944 narrower than mode B. I.e., if A is a mode narrower than B then in
4945 order to be able to operate on it in mode B, mode A needs to
4946 satisfy the requirements set by the representation of mode B. */
4947
4948 static void
4949 init_num_sign_bit_copies_in_rep (void)
4950 {
4951 enum machine_mode mode, in_mode;
4952
4953 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4954 in_mode = GET_MODE_WIDER_MODE (mode))
4955 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4956 mode = GET_MODE_WIDER_MODE (mode))
4957 {
4958 enum machine_mode i;
4959
4960 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4961 extends to the next widest mode. */
4962 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
4963 || GET_MODE_WIDER_MODE (mode) == in_mode);
4964
4965 /* We are in in_mode. Count how many bits outside of mode
4966 have to be copies of the sign-bit. */
4967 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
4968 {
4969 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
4970
4971 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
4972 /* We can only check sign-bit copies starting from the
4973 top-bit. In order to be able to check the bits we
4974 have already seen we pretend that subsequent bits
4975 have to be sign-bit copies too. */
4976 || num_sign_bit_copies_in_rep [in_mode][mode])
4977 num_sign_bit_copies_in_rep [in_mode][mode]
4978 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
4979 }
4980 }
4981 }
4982
4983 /* Suppose that truncation from the machine mode of X to MODE is not a
4984 no-op. See if there is anything special about X so that we can
4985 assume it already contains a truncated value of MODE. */
4986
4987 bool
4988 truncated_to_mode (enum machine_mode mode, const_rtx x)
4989 {
4990 /* This register has already been used in MODE without explicit
4991 truncation. */
4992 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
4993 return true;
4994
4995 /* See if we already satisfy the requirements of MODE. If yes we
4996 can just switch to MODE. */
4997 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
4998 && (num_sign_bit_copies (x, GET_MODE (x))
4999 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5000 return true;
5001
5002 return false;
5003 }
5004 \f
5005 /* Initialize non_rtx_starting_operands, which is used to speed up
5006 for_each_rtx. */
5007 void
5008 init_rtlanal (void)
5009 {
5010 int i;
5011 for (i = 0; i < NUM_RTX_CODE; i++)
5012 {
5013 const char *format = GET_RTX_FORMAT (i);
5014 const char *first = strpbrk (format, "eEV");
5015 non_rtx_starting_operands[i] = first ? first - format : -1;
5016 }
5017
5018 init_num_sign_bit_copies_in_rep ();
5019 }
5020 \f
5021 /* Check whether this is a constant pool constant. */
5022 bool
5023 constant_pool_constant_p (rtx x)
5024 {
5025 x = avoid_constant_pool_reference (x);
5026 return GET_CODE (x) == CONST_DOUBLE;
5027 }
5028