Daily bump.
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "hard-reg-set.h"
29 #include "rtl.h"
30 #include "insn-config.h"
31 #include "recog.h"
32 #include "target.h"
33 #include "output.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "regs.h"
37 #include "function.h"
38 #include "df.h"
39 #include "tree.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
41 #include "addresses.h"
42
43 /* Forward declarations */
44 static void set_of_1 (rtx, const_rtx, void *);
45 static bool covers_regno_p (const_rtx, unsigned int);
46 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
47 static int rtx_referenced_p_1 (rtx *, void *);
48 static int computed_jump_p_1 (const_rtx);
49 static void parms_set (rtx, const_rtx, void *);
50
51 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
52 const_rtx, enum machine_mode,
53 unsigned HOST_WIDE_INT);
54 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
55 const_rtx, enum machine_mode,
56 unsigned HOST_WIDE_INT);
57 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
58 enum machine_mode,
59 unsigned int);
60 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
61 enum machine_mode, unsigned int);
62
63 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
64 -1 if a code has no such operand. */
65 static int non_rtx_starting_operands[NUM_RTX_CODE];
66
67 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
68 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
69 SIGN_EXTEND then while narrowing we also have to enforce the
70 representation and sign-extend the value to mode DESTINATION_REP.
71
72 If the value is already sign-extended to DESTINATION_REP mode we
73 can just switch to DESTINATION mode on it. For each pair of
74 integral modes SOURCE and DESTINATION, when truncating from SOURCE
75 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
76 contains the number of high-order bits in SOURCE that have to be
77 copies of the sign-bit so that we can do this mode-switch to
78 DESTINATION. */
79
80 static unsigned int
81 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
82 \f
83 /* Return 1 if the value of X is unstable
84 (would be different at a different point in the program).
85 The frame pointer, arg pointer, etc. are considered stable
86 (within one function) and so is anything marked `unchanging'. */
87
88 int
89 rtx_unstable_p (const_rtx x)
90 {
91 const RTX_CODE code = GET_CODE (x);
92 int i;
93 const char *fmt;
94
95 switch (code)
96 {
97 case MEM:
98 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
99
100 case CONST:
101 CASE_CONST_ANY:
102 case SYMBOL_REF:
103 case LABEL_REF:
104 return 0;
105
106 case REG:
107 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
108 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
109 /* The arg pointer varies if it is not a fixed register. */
110 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
111 return 0;
112 /* ??? When call-clobbered, the value is stable modulo the restore
113 that must happen after a call. This currently screws up local-alloc
114 into believing that the restore is not needed. */
115 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
116 return 0;
117 return 1;
118
119 case ASM_OPERANDS:
120 if (MEM_VOLATILE_P (x))
121 return 1;
122
123 /* Fall through. */
124
125 default:
126 break;
127 }
128
129 fmt = GET_RTX_FORMAT (code);
130 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
131 if (fmt[i] == 'e')
132 {
133 if (rtx_unstable_p (XEXP (x, i)))
134 return 1;
135 }
136 else if (fmt[i] == 'E')
137 {
138 int j;
139 for (j = 0; j < XVECLEN (x, i); j++)
140 if (rtx_unstable_p (XVECEXP (x, i, j)))
141 return 1;
142 }
143
144 return 0;
145 }
146
147 /* Return 1 if X has a value that can vary even between two
148 executions of the program. 0 means X can be compared reliably
149 against certain constants or near-constants.
150 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
151 zero, we are slightly more conservative.
152 The frame pointer and the arg pointer are considered constant. */
153
154 bool
155 rtx_varies_p (const_rtx x, bool for_alias)
156 {
157 RTX_CODE code;
158 int i;
159 const char *fmt;
160
161 if (!x)
162 return 0;
163
164 code = GET_CODE (x);
165 switch (code)
166 {
167 case MEM:
168 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
169
170 case CONST:
171 CASE_CONST_ANY:
172 case SYMBOL_REF:
173 case LABEL_REF:
174 return 0;
175
176 case REG:
177 /* Note that we have to test for the actual rtx used for the frame
178 and arg pointers and not just the register number in case we have
179 eliminated the frame and/or arg pointer and are using it
180 for pseudos. */
181 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
182 /* The arg pointer varies if it is not a fixed register. */
183 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
184 return 0;
185 if (x == pic_offset_table_rtx
186 /* ??? When call-clobbered, the value is stable modulo the restore
187 that must happen after a call. This currently screws up
188 local-alloc into believing that the restore is not needed, so we
189 must return 0 only if we are called from alias analysis. */
190 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
191 return 0;
192 return 1;
193
194 case LO_SUM:
195 /* The operand 0 of a LO_SUM is considered constant
196 (in fact it is related specifically to operand 1)
197 during alias analysis. */
198 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
199 || rtx_varies_p (XEXP (x, 1), for_alias);
200
201 case ASM_OPERANDS:
202 if (MEM_VOLATILE_P (x))
203 return 1;
204
205 /* Fall through. */
206
207 default:
208 break;
209 }
210
211 fmt = GET_RTX_FORMAT (code);
212 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
213 if (fmt[i] == 'e')
214 {
215 if (rtx_varies_p (XEXP (x, i), for_alias))
216 return 1;
217 }
218 else if (fmt[i] == 'E')
219 {
220 int j;
221 for (j = 0; j < XVECLEN (x, i); j++)
222 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
223 return 1;
224 }
225
226 return 0;
227 }
228
229 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
230 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
231 whether nonzero is returned for unaligned memory accesses on strict
232 alignment machines. */
233
234 static int
235 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
236 enum machine_mode mode, bool unaligned_mems)
237 {
238 enum rtx_code code = GET_CODE (x);
239
240 if (STRICT_ALIGNMENT
241 && unaligned_mems
242 && GET_MODE_SIZE (mode) != 0)
243 {
244 HOST_WIDE_INT actual_offset = offset;
245 #ifdef SPARC_STACK_BOUNDARY_HACK
246 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
247 the real alignment of %sp. However, when it does this, the
248 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
249 if (SPARC_STACK_BOUNDARY_HACK
250 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
251 actual_offset -= STACK_POINTER_OFFSET;
252 #endif
253
254 if (actual_offset % GET_MODE_SIZE (mode) != 0)
255 return 1;
256 }
257
258 switch (code)
259 {
260 case SYMBOL_REF:
261 if (SYMBOL_REF_WEAK (x))
262 return 1;
263 if (!CONSTANT_POOL_ADDRESS_P (x))
264 {
265 tree decl;
266 HOST_WIDE_INT decl_size;
267
268 if (offset < 0)
269 return 1;
270 if (size == 0)
271 size = GET_MODE_SIZE (mode);
272 if (size == 0)
273 return offset != 0;
274
275 /* If the size of the access or of the symbol is unknown,
276 assume the worst. */
277 decl = SYMBOL_REF_DECL (x);
278
279 /* Else check that the access is in bounds. TODO: restructure
280 expr_size/tree_expr_size/int_expr_size and just use the latter. */
281 if (!decl)
282 decl_size = -1;
283 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
284 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
285 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
286 : -1);
287 else if (TREE_CODE (decl) == STRING_CST)
288 decl_size = TREE_STRING_LENGTH (decl);
289 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
290 decl_size = int_size_in_bytes (TREE_TYPE (decl));
291 else
292 decl_size = -1;
293
294 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
295 }
296
297 return 0;
298
299 case LABEL_REF:
300 return 0;
301
302 case REG:
303 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
304 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
305 || x == stack_pointer_rtx
306 /* The arg pointer varies if it is not a fixed register. */
307 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
308 return 0;
309 /* All of the virtual frame registers are stack references. */
310 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
311 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
312 return 0;
313 return 1;
314
315 case CONST:
316 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
317 mode, unaligned_mems);
318
319 case PLUS:
320 /* An address is assumed not to trap if:
321 - it is the pic register plus a constant. */
322 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
323 return 0;
324
325 /* - or it is an address that can't trap plus a constant integer,
326 with the proper remainder modulo the mode size if we are
327 considering unaligned memory references. */
328 if (CONST_INT_P (XEXP (x, 1))
329 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
330 size, mode, unaligned_mems))
331 return 0;
332
333 return 1;
334
335 case LO_SUM:
336 case PRE_MODIFY:
337 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
338 mode, unaligned_mems);
339
340 case PRE_DEC:
341 case PRE_INC:
342 case POST_DEC:
343 case POST_INC:
344 case POST_MODIFY:
345 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
346 mode, unaligned_mems);
347
348 default:
349 break;
350 }
351
352 /* If it isn't one of the case above, it can cause a trap. */
353 return 1;
354 }
355
356 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
357
358 int
359 rtx_addr_can_trap_p (const_rtx x)
360 {
361 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
362 }
363
364 /* Return true if X is an address that is known to not be zero. */
365
366 bool
367 nonzero_address_p (const_rtx x)
368 {
369 const enum rtx_code code = GET_CODE (x);
370
371 switch (code)
372 {
373 case SYMBOL_REF:
374 return !SYMBOL_REF_WEAK (x);
375
376 case LABEL_REF:
377 return true;
378
379 case REG:
380 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
381 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
382 || x == stack_pointer_rtx
383 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
384 return true;
385 /* All of the virtual frame registers are stack references. */
386 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
387 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
388 return true;
389 return false;
390
391 case CONST:
392 return nonzero_address_p (XEXP (x, 0));
393
394 case PLUS:
395 /* Handle PIC references. */
396 if (XEXP (x, 0) == pic_offset_table_rtx
397 && CONSTANT_P (XEXP (x, 1)))
398 return true;
399 return false;
400
401 case PRE_MODIFY:
402 /* Similar to the above; allow positive offsets. Further, since
403 auto-inc is only allowed in memories, the register must be a
404 pointer. */
405 if (CONST_INT_P (XEXP (x, 1))
406 && INTVAL (XEXP (x, 1)) > 0)
407 return true;
408 return nonzero_address_p (XEXP (x, 0));
409
410 case PRE_INC:
411 /* Similarly. Further, the offset is always positive. */
412 return true;
413
414 case PRE_DEC:
415 case POST_DEC:
416 case POST_INC:
417 case POST_MODIFY:
418 return nonzero_address_p (XEXP (x, 0));
419
420 case LO_SUM:
421 return nonzero_address_p (XEXP (x, 1));
422
423 default:
424 break;
425 }
426
427 /* If it isn't one of the case above, might be zero. */
428 return false;
429 }
430
431 /* Return 1 if X refers to a memory location whose address
432 cannot be compared reliably with constant addresses,
433 or if X refers to a BLKmode memory object.
434 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
435 zero, we are slightly more conservative. */
436
437 bool
438 rtx_addr_varies_p (const_rtx x, bool for_alias)
439 {
440 enum rtx_code code;
441 int i;
442 const char *fmt;
443
444 if (x == 0)
445 return 0;
446
447 code = GET_CODE (x);
448 if (code == MEM)
449 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
450
451 fmt = GET_RTX_FORMAT (code);
452 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
453 if (fmt[i] == 'e')
454 {
455 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
456 return 1;
457 }
458 else if (fmt[i] == 'E')
459 {
460 int j;
461 for (j = 0; j < XVECLEN (x, i); j++)
462 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
463 return 1;
464 }
465 return 0;
466 }
467 \f
468 /* Return the CALL in X if there is one. */
469
470 rtx
471 get_call_rtx_from (rtx x)
472 {
473 if (INSN_P (x))
474 x = PATTERN (x);
475 if (GET_CODE (x) == PARALLEL)
476 x = XVECEXP (x, 0, 0);
477 if (GET_CODE (x) == SET)
478 x = SET_SRC (x);
479 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
480 return x;
481 return NULL_RTX;
482 }
483 \f
484 /* Return the value of the integer term in X, if one is apparent;
485 otherwise return 0.
486 Only obvious integer terms are detected.
487 This is used in cse.c with the `related_value' field. */
488
489 HOST_WIDE_INT
490 get_integer_term (const_rtx x)
491 {
492 if (GET_CODE (x) == CONST)
493 x = XEXP (x, 0);
494
495 if (GET_CODE (x) == MINUS
496 && CONST_INT_P (XEXP (x, 1)))
497 return - INTVAL (XEXP (x, 1));
498 if (GET_CODE (x) == PLUS
499 && CONST_INT_P (XEXP (x, 1)))
500 return INTVAL (XEXP (x, 1));
501 return 0;
502 }
503
504 /* If X is a constant, return the value sans apparent integer term;
505 otherwise return 0.
506 Only obvious integer terms are detected. */
507
508 rtx
509 get_related_value (const_rtx x)
510 {
511 if (GET_CODE (x) != CONST)
512 return 0;
513 x = XEXP (x, 0);
514 if (GET_CODE (x) == PLUS
515 && CONST_INT_P (XEXP (x, 1)))
516 return XEXP (x, 0);
517 else if (GET_CODE (x) == MINUS
518 && CONST_INT_P (XEXP (x, 1)))
519 return XEXP (x, 0);
520 return 0;
521 }
522 \f
523 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
524 to somewhere in the same object or object_block as SYMBOL. */
525
526 bool
527 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
528 {
529 tree decl;
530
531 if (GET_CODE (symbol) != SYMBOL_REF)
532 return false;
533
534 if (offset == 0)
535 return true;
536
537 if (offset > 0)
538 {
539 if (CONSTANT_POOL_ADDRESS_P (symbol)
540 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
541 return true;
542
543 decl = SYMBOL_REF_DECL (symbol);
544 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
545 return true;
546 }
547
548 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
549 && SYMBOL_REF_BLOCK (symbol)
550 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
551 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
552 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
553 return true;
554
555 return false;
556 }
557
558 /* Split X into a base and a constant offset, storing them in *BASE_OUT
559 and *OFFSET_OUT respectively. */
560
561 void
562 split_const (rtx x, rtx *base_out, rtx *offset_out)
563 {
564 if (GET_CODE (x) == CONST)
565 {
566 x = XEXP (x, 0);
567 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
568 {
569 *base_out = XEXP (x, 0);
570 *offset_out = XEXP (x, 1);
571 return;
572 }
573 }
574 *base_out = x;
575 *offset_out = const0_rtx;
576 }
577 \f
578 /* Return the number of places FIND appears within X. If COUNT_DEST is
579 zero, we do not count occurrences inside the destination of a SET. */
580
581 int
582 count_occurrences (const_rtx x, const_rtx find, int count_dest)
583 {
584 int i, j;
585 enum rtx_code code;
586 const char *format_ptr;
587 int count;
588
589 if (x == find)
590 return 1;
591
592 code = GET_CODE (x);
593
594 switch (code)
595 {
596 case REG:
597 CASE_CONST_ANY:
598 case SYMBOL_REF:
599 case CODE_LABEL:
600 case PC:
601 case CC0:
602 return 0;
603
604 case EXPR_LIST:
605 count = count_occurrences (XEXP (x, 0), find, count_dest);
606 if (XEXP (x, 1))
607 count += count_occurrences (XEXP (x, 1), find, count_dest);
608 return count;
609
610 case MEM:
611 if (MEM_P (find) && rtx_equal_p (x, find))
612 return 1;
613 break;
614
615 case SET:
616 if (SET_DEST (x) == find && ! count_dest)
617 return count_occurrences (SET_SRC (x), find, count_dest);
618 break;
619
620 default:
621 break;
622 }
623
624 format_ptr = GET_RTX_FORMAT (code);
625 count = 0;
626
627 for (i = 0; i < GET_RTX_LENGTH (code); i++)
628 {
629 switch (*format_ptr++)
630 {
631 case 'e':
632 count += count_occurrences (XEXP (x, i), find, count_dest);
633 break;
634
635 case 'E':
636 for (j = 0; j < XVECLEN (x, i); j++)
637 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
638 break;
639 }
640 }
641 return count;
642 }
643
644 \f
645 /* Return TRUE if OP is a register or subreg of a register that
646 holds an unsigned quantity. Otherwise, return FALSE. */
647
648 bool
649 unsigned_reg_p (rtx op)
650 {
651 if (REG_P (op)
652 && REG_EXPR (op)
653 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
654 return true;
655
656 if (GET_CODE (op) == SUBREG
657 && SUBREG_PROMOTED_UNSIGNED_P (op))
658 return true;
659
660 return false;
661 }
662
663 \f
664 /* Nonzero if register REG appears somewhere within IN.
665 Also works if REG is not a register; in this case it checks
666 for a subexpression of IN that is Lisp "equal" to REG. */
667
668 int
669 reg_mentioned_p (const_rtx reg, const_rtx in)
670 {
671 const char *fmt;
672 int i;
673 enum rtx_code code;
674
675 if (in == 0)
676 return 0;
677
678 if (reg == in)
679 return 1;
680
681 if (GET_CODE (in) == LABEL_REF)
682 return reg == XEXP (in, 0);
683
684 code = GET_CODE (in);
685
686 switch (code)
687 {
688 /* Compare registers by number. */
689 case REG:
690 return REG_P (reg) && REGNO (in) == REGNO (reg);
691
692 /* These codes have no constituent expressions
693 and are unique. */
694 case SCRATCH:
695 case CC0:
696 case PC:
697 return 0;
698
699 CASE_CONST_ANY:
700 /* These are kept unique for a given value. */
701 return 0;
702
703 default:
704 break;
705 }
706
707 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
708 return 1;
709
710 fmt = GET_RTX_FORMAT (code);
711
712 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
713 {
714 if (fmt[i] == 'E')
715 {
716 int j;
717 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
718 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
719 return 1;
720 }
721 else if (fmt[i] == 'e'
722 && reg_mentioned_p (reg, XEXP (in, i)))
723 return 1;
724 }
725 return 0;
726 }
727 \f
728 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
729 no CODE_LABEL insn. */
730
731 int
732 no_labels_between_p (const_rtx beg, const_rtx end)
733 {
734 rtx p;
735 if (beg == end)
736 return 0;
737 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
738 if (LABEL_P (p))
739 return 0;
740 return 1;
741 }
742
743 /* Nonzero if register REG is used in an insn between
744 FROM_INSN and TO_INSN (exclusive of those two). */
745
746 int
747 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
748 {
749 rtx insn;
750
751 if (from_insn == to_insn)
752 return 0;
753
754 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
755 if (NONDEBUG_INSN_P (insn)
756 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
757 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
758 return 1;
759 return 0;
760 }
761 \f
762 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
763 is entirely replaced by a new value and the only use is as a SET_DEST,
764 we do not consider it a reference. */
765
766 int
767 reg_referenced_p (const_rtx x, const_rtx body)
768 {
769 int i;
770
771 switch (GET_CODE (body))
772 {
773 case SET:
774 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
775 return 1;
776
777 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
778 of a REG that occupies all of the REG, the insn references X if
779 it is mentioned in the destination. */
780 if (GET_CODE (SET_DEST (body)) != CC0
781 && GET_CODE (SET_DEST (body)) != PC
782 && !REG_P (SET_DEST (body))
783 && ! (GET_CODE (SET_DEST (body)) == SUBREG
784 && REG_P (SUBREG_REG (SET_DEST (body)))
785 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
786 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
787 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
788 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
789 && reg_overlap_mentioned_p (x, SET_DEST (body)))
790 return 1;
791 return 0;
792
793 case ASM_OPERANDS:
794 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
795 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
796 return 1;
797 return 0;
798
799 case CALL:
800 case USE:
801 case IF_THEN_ELSE:
802 return reg_overlap_mentioned_p (x, body);
803
804 case TRAP_IF:
805 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
806
807 case PREFETCH:
808 return reg_overlap_mentioned_p (x, XEXP (body, 0));
809
810 case UNSPEC:
811 case UNSPEC_VOLATILE:
812 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
813 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
814 return 1;
815 return 0;
816
817 case PARALLEL:
818 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
819 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
820 return 1;
821 return 0;
822
823 case CLOBBER:
824 if (MEM_P (XEXP (body, 0)))
825 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
826 return 1;
827 return 0;
828
829 case COND_EXEC:
830 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
831 return 1;
832 return reg_referenced_p (x, COND_EXEC_CODE (body));
833
834 default:
835 return 0;
836 }
837 }
838 \f
839 /* Nonzero if register REG is set or clobbered in an insn between
840 FROM_INSN and TO_INSN (exclusive of those two). */
841
842 int
843 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
844 {
845 const_rtx insn;
846
847 if (from_insn == to_insn)
848 return 0;
849
850 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
851 if (INSN_P (insn) && reg_set_p (reg, insn))
852 return 1;
853 return 0;
854 }
855
856 /* Internals of reg_set_between_p. */
857 int
858 reg_set_p (const_rtx reg, const_rtx insn)
859 {
860 /* We can be passed an insn or part of one. If we are passed an insn,
861 check if a side-effect of the insn clobbers REG. */
862 if (INSN_P (insn)
863 && (FIND_REG_INC_NOTE (insn, reg)
864 || (CALL_P (insn)
865 && ((REG_P (reg)
866 && REGNO (reg) < FIRST_PSEUDO_REGISTER
867 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
868 GET_MODE (reg), REGNO (reg)))
869 || MEM_P (reg)
870 || find_reg_fusage (insn, CLOBBER, reg)))))
871 return 1;
872
873 return set_of (reg, insn) != NULL_RTX;
874 }
875
876 /* Similar to reg_set_between_p, but check all registers in X. Return 0
877 only if none of them are modified between START and END. Return 1 if
878 X contains a MEM; this routine does use memory aliasing. */
879
880 int
881 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
882 {
883 const enum rtx_code code = GET_CODE (x);
884 const char *fmt;
885 int i, j;
886 rtx insn;
887
888 if (start == end)
889 return 0;
890
891 switch (code)
892 {
893 CASE_CONST_ANY:
894 case CONST:
895 case SYMBOL_REF:
896 case LABEL_REF:
897 return 0;
898
899 case PC:
900 case CC0:
901 return 1;
902
903 case MEM:
904 if (modified_between_p (XEXP (x, 0), start, end))
905 return 1;
906 if (MEM_READONLY_P (x))
907 return 0;
908 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
909 if (memory_modified_in_insn_p (x, insn))
910 return 1;
911 return 0;
912 break;
913
914 case REG:
915 return reg_set_between_p (x, start, end);
916
917 default:
918 break;
919 }
920
921 fmt = GET_RTX_FORMAT (code);
922 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
923 {
924 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
925 return 1;
926
927 else if (fmt[i] == 'E')
928 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
929 if (modified_between_p (XVECEXP (x, i, j), start, end))
930 return 1;
931 }
932
933 return 0;
934 }
935
936 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
937 of them are modified in INSN. Return 1 if X contains a MEM; this routine
938 does use memory aliasing. */
939
940 int
941 modified_in_p (const_rtx x, const_rtx insn)
942 {
943 const enum rtx_code code = GET_CODE (x);
944 const char *fmt;
945 int i, j;
946
947 switch (code)
948 {
949 CASE_CONST_ANY:
950 case CONST:
951 case SYMBOL_REF:
952 case LABEL_REF:
953 return 0;
954
955 case PC:
956 case CC0:
957 return 1;
958
959 case MEM:
960 if (modified_in_p (XEXP (x, 0), insn))
961 return 1;
962 if (MEM_READONLY_P (x))
963 return 0;
964 if (memory_modified_in_insn_p (x, insn))
965 return 1;
966 return 0;
967 break;
968
969 case REG:
970 return reg_set_p (x, insn);
971
972 default:
973 break;
974 }
975
976 fmt = GET_RTX_FORMAT (code);
977 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
978 {
979 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
980 return 1;
981
982 else if (fmt[i] == 'E')
983 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
984 if (modified_in_p (XVECEXP (x, i, j), insn))
985 return 1;
986 }
987
988 return 0;
989 }
990 \f
991 /* Helper function for set_of. */
992 struct set_of_data
993 {
994 const_rtx found;
995 const_rtx pat;
996 };
997
998 static void
999 set_of_1 (rtx x, const_rtx pat, void *data1)
1000 {
1001 struct set_of_data *const data = (struct set_of_data *) (data1);
1002 if (rtx_equal_p (x, data->pat)
1003 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1004 data->found = pat;
1005 }
1006
1007 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1008 (either directly or via STRICT_LOW_PART and similar modifiers). */
1009 const_rtx
1010 set_of (const_rtx pat, const_rtx insn)
1011 {
1012 struct set_of_data data;
1013 data.found = NULL_RTX;
1014 data.pat = pat;
1015 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1016 return data.found;
1017 }
1018
1019 /* This function, called through note_stores, collects sets and
1020 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1021 by DATA. */
1022 void
1023 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1024 {
1025 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1026 if (REG_P (x) && HARD_REGISTER_P (x))
1027 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1028 }
1029
1030 /* Examine INSN, and compute the set of hard registers written by it.
1031 Store it in *PSET. Should only be called after reload. */
1032 void
1033 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
1034 {
1035 rtx link;
1036
1037 CLEAR_HARD_REG_SET (*pset);
1038 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1039 if (CALL_P (insn))
1040 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1041 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1042 if (REG_NOTE_KIND (link) == REG_INC)
1043 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1044 }
1045
1046 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1047 static int
1048 record_hard_reg_uses_1 (rtx *px, void *data)
1049 {
1050 rtx x = *px;
1051 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1052
1053 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1054 {
1055 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1056 while (nregs-- > 0)
1057 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1058 }
1059 return 0;
1060 }
1061
1062 /* Like record_hard_reg_sets, but called through note_uses. */
1063 void
1064 record_hard_reg_uses (rtx *px, void *data)
1065 {
1066 for_each_rtx (px, record_hard_reg_uses_1, data);
1067 }
1068 \f
1069 /* Given an INSN, return a SET expression if this insn has only a single SET.
1070 It may also have CLOBBERs, USEs, or SET whose output
1071 will not be used, which we ignore. */
1072
1073 rtx
1074 single_set_2 (const_rtx insn, const_rtx pat)
1075 {
1076 rtx set = NULL;
1077 int set_verified = 1;
1078 int i;
1079
1080 if (GET_CODE (pat) == PARALLEL)
1081 {
1082 for (i = 0; i < XVECLEN (pat, 0); i++)
1083 {
1084 rtx sub = XVECEXP (pat, 0, i);
1085 switch (GET_CODE (sub))
1086 {
1087 case USE:
1088 case CLOBBER:
1089 break;
1090
1091 case SET:
1092 /* We can consider insns having multiple sets, where all
1093 but one are dead as single set insns. In common case
1094 only single set is present in the pattern so we want
1095 to avoid checking for REG_UNUSED notes unless necessary.
1096
1097 When we reach set first time, we just expect this is
1098 the single set we are looking for and only when more
1099 sets are found in the insn, we check them. */
1100 if (!set_verified)
1101 {
1102 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1103 && !side_effects_p (set))
1104 set = NULL;
1105 else
1106 set_verified = 1;
1107 }
1108 if (!set)
1109 set = sub, set_verified = 0;
1110 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1111 || side_effects_p (sub))
1112 return NULL_RTX;
1113 break;
1114
1115 default:
1116 return NULL_RTX;
1117 }
1118 }
1119 }
1120 return set;
1121 }
1122
1123 /* Given an INSN, return nonzero if it has more than one SET, else return
1124 zero. */
1125
1126 int
1127 multiple_sets (const_rtx insn)
1128 {
1129 int found;
1130 int i;
1131
1132 /* INSN must be an insn. */
1133 if (! INSN_P (insn))
1134 return 0;
1135
1136 /* Only a PARALLEL can have multiple SETs. */
1137 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1138 {
1139 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1140 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1141 {
1142 /* If we have already found a SET, then return now. */
1143 if (found)
1144 return 1;
1145 else
1146 found = 1;
1147 }
1148 }
1149
1150 /* Either zero or one SET. */
1151 return 0;
1152 }
1153 \f
1154 /* Return nonzero if the destination of SET equals the source
1155 and there are no side effects. */
1156
1157 int
1158 set_noop_p (const_rtx set)
1159 {
1160 rtx src = SET_SRC (set);
1161 rtx dst = SET_DEST (set);
1162
1163 if (dst == pc_rtx && src == pc_rtx)
1164 return 1;
1165
1166 if (MEM_P (dst) && MEM_P (src))
1167 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1168
1169 if (GET_CODE (dst) == ZERO_EXTRACT)
1170 return rtx_equal_p (XEXP (dst, 0), src)
1171 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1172 && !side_effects_p (src);
1173
1174 if (GET_CODE (dst) == STRICT_LOW_PART)
1175 dst = XEXP (dst, 0);
1176
1177 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1178 {
1179 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1180 return 0;
1181 src = SUBREG_REG (src);
1182 dst = SUBREG_REG (dst);
1183 }
1184
1185 return (REG_P (src) && REG_P (dst)
1186 && REGNO (src) == REGNO (dst));
1187 }
1188 \f
1189 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1190 value to itself. */
1191
1192 int
1193 noop_move_p (const_rtx insn)
1194 {
1195 rtx pat = PATTERN (insn);
1196
1197 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1198 return 1;
1199
1200 /* Insns carrying these notes are useful later on. */
1201 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1202 return 0;
1203
1204 if (GET_CODE (pat) == SET && set_noop_p (pat))
1205 return 1;
1206
1207 if (GET_CODE (pat) == PARALLEL)
1208 {
1209 int i;
1210 /* If nothing but SETs of registers to themselves,
1211 this insn can also be deleted. */
1212 for (i = 0; i < XVECLEN (pat, 0); i++)
1213 {
1214 rtx tem = XVECEXP (pat, 0, i);
1215
1216 if (GET_CODE (tem) == USE
1217 || GET_CODE (tem) == CLOBBER)
1218 continue;
1219
1220 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1221 return 0;
1222 }
1223
1224 return 1;
1225 }
1226 return 0;
1227 }
1228 \f
1229
1230 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1231 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1232 If the object was modified, if we hit a partial assignment to X, or hit a
1233 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1234 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1235 be the src. */
1236
1237 rtx
1238 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1239 {
1240 rtx p;
1241
1242 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1243 p = PREV_INSN (p))
1244 if (INSN_P (p))
1245 {
1246 rtx set = single_set (p);
1247 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1248
1249 if (set && rtx_equal_p (x, SET_DEST (set)))
1250 {
1251 rtx src = SET_SRC (set);
1252
1253 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1254 src = XEXP (note, 0);
1255
1256 if ((valid_to == NULL_RTX
1257 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1258 /* Reject hard registers because we don't usually want
1259 to use them; we'd rather use a pseudo. */
1260 && (! (REG_P (src)
1261 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1262 {
1263 *pinsn = p;
1264 return src;
1265 }
1266 }
1267
1268 /* If set in non-simple way, we don't have a value. */
1269 if (reg_set_p (x, p))
1270 break;
1271 }
1272
1273 return x;
1274 }
1275 \f
1276 /* Return nonzero if register in range [REGNO, ENDREGNO)
1277 appears either explicitly or implicitly in X
1278 other than being stored into.
1279
1280 References contained within the substructure at LOC do not count.
1281 LOC may be zero, meaning don't ignore anything. */
1282
1283 int
1284 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1285 rtx *loc)
1286 {
1287 int i;
1288 unsigned int x_regno;
1289 RTX_CODE code;
1290 const char *fmt;
1291
1292 repeat:
1293 /* The contents of a REG_NONNEG note is always zero, so we must come here
1294 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1295 if (x == 0)
1296 return 0;
1297
1298 code = GET_CODE (x);
1299
1300 switch (code)
1301 {
1302 case REG:
1303 x_regno = REGNO (x);
1304
1305 /* If we modifying the stack, frame, or argument pointer, it will
1306 clobber a virtual register. In fact, we could be more precise,
1307 but it isn't worth it. */
1308 if ((x_regno == STACK_POINTER_REGNUM
1309 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1310 || x_regno == ARG_POINTER_REGNUM
1311 #endif
1312 || x_regno == FRAME_POINTER_REGNUM)
1313 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1314 return 1;
1315
1316 return endregno > x_regno && regno < END_REGNO (x);
1317
1318 case SUBREG:
1319 /* If this is a SUBREG of a hard reg, we can see exactly which
1320 registers are being modified. Otherwise, handle normally. */
1321 if (REG_P (SUBREG_REG (x))
1322 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1323 {
1324 unsigned int inner_regno = subreg_regno (x);
1325 unsigned int inner_endregno
1326 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1327 ? subreg_nregs (x) : 1);
1328
1329 return endregno > inner_regno && regno < inner_endregno;
1330 }
1331 break;
1332
1333 case CLOBBER:
1334 case SET:
1335 if (&SET_DEST (x) != loc
1336 /* Note setting a SUBREG counts as referring to the REG it is in for
1337 a pseudo but not for hard registers since we can
1338 treat each word individually. */
1339 && ((GET_CODE (SET_DEST (x)) == SUBREG
1340 && loc != &SUBREG_REG (SET_DEST (x))
1341 && REG_P (SUBREG_REG (SET_DEST (x)))
1342 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1343 && refers_to_regno_p (regno, endregno,
1344 SUBREG_REG (SET_DEST (x)), loc))
1345 || (!REG_P (SET_DEST (x))
1346 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1347 return 1;
1348
1349 if (code == CLOBBER || loc == &SET_SRC (x))
1350 return 0;
1351 x = SET_SRC (x);
1352 goto repeat;
1353
1354 default:
1355 break;
1356 }
1357
1358 /* X does not match, so try its subexpressions. */
1359
1360 fmt = GET_RTX_FORMAT (code);
1361 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1362 {
1363 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1364 {
1365 if (i == 0)
1366 {
1367 x = XEXP (x, 0);
1368 goto repeat;
1369 }
1370 else
1371 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1372 return 1;
1373 }
1374 else if (fmt[i] == 'E')
1375 {
1376 int j;
1377 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1378 if (loc != &XVECEXP (x, i, j)
1379 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1380 return 1;
1381 }
1382 }
1383 return 0;
1384 }
1385
1386 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1387 we check if any register number in X conflicts with the relevant register
1388 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1389 contains a MEM (we don't bother checking for memory addresses that can't
1390 conflict because we expect this to be a rare case. */
1391
1392 int
1393 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1394 {
1395 unsigned int regno, endregno;
1396
1397 /* If either argument is a constant, then modifying X can not
1398 affect IN. Here we look at IN, we can profitably combine
1399 CONSTANT_P (x) with the switch statement below. */
1400 if (CONSTANT_P (in))
1401 return 0;
1402
1403 recurse:
1404 switch (GET_CODE (x))
1405 {
1406 case STRICT_LOW_PART:
1407 case ZERO_EXTRACT:
1408 case SIGN_EXTRACT:
1409 /* Overly conservative. */
1410 x = XEXP (x, 0);
1411 goto recurse;
1412
1413 case SUBREG:
1414 regno = REGNO (SUBREG_REG (x));
1415 if (regno < FIRST_PSEUDO_REGISTER)
1416 regno = subreg_regno (x);
1417 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1418 ? subreg_nregs (x) : 1);
1419 goto do_reg;
1420
1421 case REG:
1422 regno = REGNO (x);
1423 endregno = END_REGNO (x);
1424 do_reg:
1425 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1426
1427 case MEM:
1428 {
1429 const char *fmt;
1430 int i;
1431
1432 if (MEM_P (in))
1433 return 1;
1434
1435 fmt = GET_RTX_FORMAT (GET_CODE (in));
1436 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1437 if (fmt[i] == 'e')
1438 {
1439 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1440 return 1;
1441 }
1442 else if (fmt[i] == 'E')
1443 {
1444 int j;
1445 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1446 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1447 return 1;
1448 }
1449
1450 return 0;
1451 }
1452
1453 case SCRATCH:
1454 case PC:
1455 case CC0:
1456 return reg_mentioned_p (x, in);
1457
1458 case PARALLEL:
1459 {
1460 int i;
1461
1462 /* If any register in here refers to it we return true. */
1463 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1464 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1465 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1466 return 1;
1467 return 0;
1468 }
1469
1470 default:
1471 gcc_assert (CONSTANT_P (x));
1472 return 0;
1473 }
1474 }
1475 \f
1476 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1477 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1478 ignored by note_stores, but passed to FUN.
1479
1480 FUN receives three arguments:
1481 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1482 2. the SET or CLOBBER rtx that does the store,
1483 3. the pointer DATA provided to note_stores.
1484
1485 If the item being stored in or clobbered is a SUBREG of a hard register,
1486 the SUBREG will be passed. */
1487
1488 void
1489 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1490 {
1491 int i;
1492
1493 if (GET_CODE (x) == COND_EXEC)
1494 x = COND_EXEC_CODE (x);
1495
1496 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1497 {
1498 rtx dest = SET_DEST (x);
1499
1500 while ((GET_CODE (dest) == SUBREG
1501 && (!REG_P (SUBREG_REG (dest))
1502 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1503 || GET_CODE (dest) == ZERO_EXTRACT
1504 || GET_CODE (dest) == STRICT_LOW_PART)
1505 dest = XEXP (dest, 0);
1506
1507 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1508 each of whose first operand is a register. */
1509 if (GET_CODE (dest) == PARALLEL)
1510 {
1511 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1512 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1513 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1514 }
1515 else
1516 (*fun) (dest, x, data);
1517 }
1518
1519 else if (GET_CODE (x) == PARALLEL)
1520 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1521 note_stores (XVECEXP (x, 0, i), fun, data);
1522 }
1523 \f
1524 /* Like notes_stores, but call FUN for each expression that is being
1525 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1526 FUN for each expression, not any interior subexpressions. FUN receives a
1527 pointer to the expression and the DATA passed to this function.
1528
1529 Note that this is not quite the same test as that done in reg_referenced_p
1530 since that considers something as being referenced if it is being
1531 partially set, while we do not. */
1532
1533 void
1534 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1535 {
1536 rtx body = *pbody;
1537 int i;
1538
1539 switch (GET_CODE (body))
1540 {
1541 case COND_EXEC:
1542 (*fun) (&COND_EXEC_TEST (body), data);
1543 note_uses (&COND_EXEC_CODE (body), fun, data);
1544 return;
1545
1546 case PARALLEL:
1547 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1548 note_uses (&XVECEXP (body, 0, i), fun, data);
1549 return;
1550
1551 case SEQUENCE:
1552 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1553 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1554 return;
1555
1556 case USE:
1557 (*fun) (&XEXP (body, 0), data);
1558 return;
1559
1560 case ASM_OPERANDS:
1561 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1562 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1563 return;
1564
1565 case TRAP_IF:
1566 (*fun) (&TRAP_CONDITION (body), data);
1567 return;
1568
1569 case PREFETCH:
1570 (*fun) (&XEXP (body, 0), data);
1571 return;
1572
1573 case UNSPEC:
1574 case UNSPEC_VOLATILE:
1575 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1576 (*fun) (&XVECEXP (body, 0, i), data);
1577 return;
1578
1579 case CLOBBER:
1580 if (MEM_P (XEXP (body, 0)))
1581 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1582 return;
1583
1584 case SET:
1585 {
1586 rtx dest = SET_DEST (body);
1587
1588 /* For sets we replace everything in source plus registers in memory
1589 expression in store and operands of a ZERO_EXTRACT. */
1590 (*fun) (&SET_SRC (body), data);
1591
1592 if (GET_CODE (dest) == ZERO_EXTRACT)
1593 {
1594 (*fun) (&XEXP (dest, 1), data);
1595 (*fun) (&XEXP (dest, 2), data);
1596 }
1597
1598 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1599 dest = XEXP (dest, 0);
1600
1601 if (MEM_P (dest))
1602 (*fun) (&XEXP (dest, 0), data);
1603 }
1604 return;
1605
1606 default:
1607 /* All the other possibilities never store. */
1608 (*fun) (pbody, data);
1609 return;
1610 }
1611 }
1612 \f
1613 /* Return nonzero if X's old contents don't survive after INSN.
1614 This will be true if X is (cc0) or if X is a register and
1615 X dies in INSN or because INSN entirely sets X.
1616
1617 "Entirely set" means set directly and not through a SUBREG, or
1618 ZERO_EXTRACT, so no trace of the old contents remains.
1619 Likewise, REG_INC does not count.
1620
1621 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1622 but for this use that makes no difference, since regs don't overlap
1623 during their lifetimes. Therefore, this function may be used
1624 at any time after deaths have been computed.
1625
1626 If REG is a hard reg that occupies multiple machine registers, this
1627 function will only return 1 if each of those registers will be replaced
1628 by INSN. */
1629
1630 int
1631 dead_or_set_p (const_rtx insn, const_rtx x)
1632 {
1633 unsigned int regno, end_regno;
1634 unsigned int i;
1635
1636 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1637 if (GET_CODE (x) == CC0)
1638 return 1;
1639
1640 gcc_assert (REG_P (x));
1641
1642 regno = REGNO (x);
1643 end_regno = END_REGNO (x);
1644 for (i = regno; i < end_regno; i++)
1645 if (! dead_or_set_regno_p (insn, i))
1646 return 0;
1647
1648 return 1;
1649 }
1650
1651 /* Return TRUE iff DEST is a register or subreg of a register and
1652 doesn't change the number of words of the inner register, and any
1653 part of the register is TEST_REGNO. */
1654
1655 static bool
1656 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1657 {
1658 unsigned int regno, endregno;
1659
1660 if (GET_CODE (dest) == SUBREG
1661 && (((GET_MODE_SIZE (GET_MODE (dest))
1662 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1663 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1664 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1665 dest = SUBREG_REG (dest);
1666
1667 if (!REG_P (dest))
1668 return false;
1669
1670 regno = REGNO (dest);
1671 endregno = END_REGNO (dest);
1672 return (test_regno >= regno && test_regno < endregno);
1673 }
1674
1675 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1676 any member matches the covers_regno_no_parallel_p criteria. */
1677
1678 static bool
1679 covers_regno_p (const_rtx dest, unsigned int test_regno)
1680 {
1681 if (GET_CODE (dest) == PARALLEL)
1682 {
1683 /* Some targets place small structures in registers for return
1684 values of functions, and those registers are wrapped in
1685 PARALLELs that we may see as the destination of a SET. */
1686 int i;
1687
1688 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1689 {
1690 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1691 if (inner != NULL_RTX
1692 && covers_regno_no_parallel_p (inner, test_regno))
1693 return true;
1694 }
1695
1696 return false;
1697 }
1698 else
1699 return covers_regno_no_parallel_p (dest, test_regno);
1700 }
1701
1702 /* Utility function for dead_or_set_p to check an individual register. */
1703
1704 int
1705 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1706 {
1707 const_rtx pattern;
1708
1709 /* See if there is a death note for something that includes TEST_REGNO. */
1710 if (find_regno_note (insn, REG_DEAD, test_regno))
1711 return 1;
1712
1713 if (CALL_P (insn)
1714 && find_regno_fusage (insn, CLOBBER, test_regno))
1715 return 1;
1716
1717 pattern = PATTERN (insn);
1718
1719 /* If a COND_EXEC is not executed, the value survives. */
1720 if (GET_CODE (pattern) == COND_EXEC)
1721 return 0;
1722
1723 if (GET_CODE (pattern) == SET)
1724 return covers_regno_p (SET_DEST (pattern), test_regno);
1725 else if (GET_CODE (pattern) == PARALLEL)
1726 {
1727 int i;
1728
1729 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1730 {
1731 rtx body = XVECEXP (pattern, 0, i);
1732
1733 if (GET_CODE (body) == COND_EXEC)
1734 body = COND_EXEC_CODE (body);
1735
1736 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1737 && covers_regno_p (SET_DEST (body), test_regno))
1738 return 1;
1739 }
1740 }
1741
1742 return 0;
1743 }
1744
1745 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1746 If DATUM is nonzero, look for one whose datum is DATUM. */
1747
1748 rtx
1749 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1750 {
1751 rtx link;
1752
1753 gcc_checking_assert (insn);
1754
1755 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1756 if (! INSN_P (insn))
1757 return 0;
1758 if (datum == 0)
1759 {
1760 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1761 if (REG_NOTE_KIND (link) == kind)
1762 return link;
1763 return 0;
1764 }
1765
1766 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1767 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1768 return link;
1769 return 0;
1770 }
1771
1772 /* Return the reg-note of kind KIND in insn INSN which applies to register
1773 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1774 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1775 it might be the case that the note overlaps REGNO. */
1776
1777 rtx
1778 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1779 {
1780 rtx link;
1781
1782 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1783 if (! INSN_P (insn))
1784 return 0;
1785
1786 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1787 if (REG_NOTE_KIND (link) == kind
1788 /* Verify that it is a register, so that scratch and MEM won't cause a
1789 problem here. */
1790 && REG_P (XEXP (link, 0))
1791 && REGNO (XEXP (link, 0)) <= regno
1792 && END_REGNO (XEXP (link, 0)) > regno)
1793 return link;
1794 return 0;
1795 }
1796
1797 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1798 has such a note. */
1799
1800 rtx
1801 find_reg_equal_equiv_note (const_rtx insn)
1802 {
1803 rtx link;
1804
1805 if (!INSN_P (insn))
1806 return 0;
1807
1808 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1809 if (REG_NOTE_KIND (link) == REG_EQUAL
1810 || REG_NOTE_KIND (link) == REG_EQUIV)
1811 {
1812 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1813 insns that have multiple sets. Checking single_set to
1814 make sure of this is not the proper check, as explained
1815 in the comment in set_unique_reg_note.
1816
1817 This should be changed into an assert. */
1818 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1819 return 0;
1820 return link;
1821 }
1822 return NULL;
1823 }
1824
1825 /* Check whether INSN is a single_set whose source is known to be
1826 equivalent to a constant. Return that constant if so, otherwise
1827 return null. */
1828
1829 rtx
1830 find_constant_src (const_rtx insn)
1831 {
1832 rtx note, set, x;
1833
1834 set = single_set (insn);
1835 if (set)
1836 {
1837 x = avoid_constant_pool_reference (SET_SRC (set));
1838 if (CONSTANT_P (x))
1839 return x;
1840 }
1841
1842 note = find_reg_equal_equiv_note (insn);
1843 if (note && CONSTANT_P (XEXP (note, 0)))
1844 return XEXP (note, 0);
1845
1846 return NULL_RTX;
1847 }
1848
1849 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1850 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1851
1852 int
1853 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1854 {
1855 /* If it's not a CALL_INSN, it can't possibly have a
1856 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1857 if (!CALL_P (insn))
1858 return 0;
1859
1860 gcc_assert (datum);
1861
1862 if (!REG_P (datum))
1863 {
1864 rtx link;
1865
1866 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1867 link;
1868 link = XEXP (link, 1))
1869 if (GET_CODE (XEXP (link, 0)) == code
1870 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1871 return 1;
1872 }
1873 else
1874 {
1875 unsigned int regno = REGNO (datum);
1876
1877 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1878 to pseudo registers, so don't bother checking. */
1879
1880 if (regno < FIRST_PSEUDO_REGISTER)
1881 {
1882 unsigned int end_regno = END_HARD_REGNO (datum);
1883 unsigned int i;
1884
1885 for (i = regno; i < end_regno; i++)
1886 if (find_regno_fusage (insn, code, i))
1887 return 1;
1888 }
1889 }
1890
1891 return 0;
1892 }
1893
1894 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1895 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1896
1897 int
1898 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1899 {
1900 rtx link;
1901
1902 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1903 to pseudo registers, so don't bother checking. */
1904
1905 if (regno >= FIRST_PSEUDO_REGISTER
1906 || !CALL_P (insn) )
1907 return 0;
1908
1909 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1910 {
1911 rtx op, reg;
1912
1913 if (GET_CODE (op = XEXP (link, 0)) == code
1914 && REG_P (reg = XEXP (op, 0))
1915 && REGNO (reg) <= regno
1916 && END_HARD_REGNO (reg) > regno)
1917 return 1;
1918 }
1919
1920 return 0;
1921 }
1922
1923 \f
1924 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1925 stored as the pointer to the next register note. */
1926
1927 rtx
1928 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1929 {
1930 rtx note;
1931
1932 switch (kind)
1933 {
1934 case REG_CC_SETTER:
1935 case REG_CC_USER:
1936 case REG_LABEL_TARGET:
1937 case REG_LABEL_OPERAND:
1938 case REG_TM:
1939 /* These types of register notes use an INSN_LIST rather than an
1940 EXPR_LIST, so that copying is done right and dumps look
1941 better. */
1942 note = alloc_INSN_LIST (datum, list);
1943 PUT_REG_NOTE_KIND (note, kind);
1944 break;
1945
1946 default:
1947 note = alloc_EXPR_LIST (kind, datum, list);
1948 break;
1949 }
1950
1951 return note;
1952 }
1953
1954 /* Add register note with kind KIND and datum DATUM to INSN. */
1955
1956 void
1957 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1958 {
1959 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1960 }
1961
1962 /* Remove register note NOTE from the REG_NOTES of INSN. */
1963
1964 void
1965 remove_note (rtx insn, const_rtx note)
1966 {
1967 rtx link;
1968
1969 if (note == NULL_RTX)
1970 return;
1971
1972 if (REG_NOTES (insn) == note)
1973 REG_NOTES (insn) = XEXP (note, 1);
1974 else
1975 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1976 if (XEXP (link, 1) == note)
1977 {
1978 XEXP (link, 1) = XEXP (note, 1);
1979 break;
1980 }
1981
1982 switch (REG_NOTE_KIND (note))
1983 {
1984 case REG_EQUAL:
1985 case REG_EQUIV:
1986 df_notes_rescan (insn);
1987 break;
1988 default:
1989 break;
1990 }
1991 }
1992
1993 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1994
1995 void
1996 remove_reg_equal_equiv_notes (rtx insn)
1997 {
1998 rtx *loc;
1999
2000 loc = &REG_NOTES (insn);
2001 while (*loc)
2002 {
2003 enum reg_note kind = REG_NOTE_KIND (*loc);
2004 if (kind == REG_EQUAL || kind == REG_EQUIV)
2005 *loc = XEXP (*loc, 1);
2006 else
2007 loc = &XEXP (*loc, 1);
2008 }
2009 }
2010
2011 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2012
2013 void
2014 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2015 {
2016 df_ref eq_use;
2017
2018 if (!df)
2019 return;
2020
2021 /* This loop is a little tricky. We cannot just go down the chain because
2022 it is being modified by some actions in the loop. So we just iterate
2023 over the head. We plan to drain the list anyway. */
2024 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2025 {
2026 rtx insn = DF_REF_INSN (eq_use);
2027 rtx note = find_reg_equal_equiv_note (insn);
2028
2029 /* This assert is generally triggered when someone deletes a REG_EQUAL
2030 or REG_EQUIV note by hacking the list manually rather than calling
2031 remove_note. */
2032 gcc_assert (note);
2033
2034 remove_note (insn, note);
2035 }
2036 }
2037
2038 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2039 return 1 if it is found. A simple equality test is used to determine if
2040 NODE matches. */
2041
2042 int
2043 in_expr_list_p (const_rtx listp, const_rtx node)
2044 {
2045 const_rtx x;
2046
2047 for (x = listp; x; x = XEXP (x, 1))
2048 if (node == XEXP (x, 0))
2049 return 1;
2050
2051 return 0;
2052 }
2053
2054 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2055 remove that entry from the list if it is found.
2056
2057 A simple equality test is used to determine if NODE matches. */
2058
2059 void
2060 remove_node_from_expr_list (const_rtx node, rtx *listp)
2061 {
2062 rtx temp = *listp;
2063 rtx prev = NULL_RTX;
2064
2065 while (temp)
2066 {
2067 if (node == XEXP (temp, 0))
2068 {
2069 /* Splice the node out of the list. */
2070 if (prev)
2071 XEXP (prev, 1) = XEXP (temp, 1);
2072 else
2073 *listp = XEXP (temp, 1);
2074
2075 return;
2076 }
2077
2078 prev = temp;
2079 temp = XEXP (temp, 1);
2080 }
2081 }
2082 \f
2083 /* Nonzero if X contains any volatile instructions. These are instructions
2084 which may cause unpredictable machine state instructions, and thus no
2085 instructions or register uses should be moved or combined across them.
2086 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2087
2088 int
2089 volatile_insn_p (const_rtx x)
2090 {
2091 const RTX_CODE code = GET_CODE (x);
2092 switch (code)
2093 {
2094 case LABEL_REF:
2095 case SYMBOL_REF:
2096 case CONST:
2097 CASE_CONST_ANY:
2098 case CC0:
2099 case PC:
2100 case REG:
2101 case SCRATCH:
2102 case CLOBBER:
2103 case ADDR_VEC:
2104 case ADDR_DIFF_VEC:
2105 case CALL:
2106 case MEM:
2107 return 0;
2108
2109 case UNSPEC_VOLATILE:
2110 return 1;
2111
2112 case ASM_INPUT:
2113 case ASM_OPERANDS:
2114 if (MEM_VOLATILE_P (x))
2115 return 1;
2116
2117 default:
2118 break;
2119 }
2120
2121 /* Recursively scan the operands of this expression. */
2122
2123 {
2124 const char *const fmt = GET_RTX_FORMAT (code);
2125 int i;
2126
2127 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2128 {
2129 if (fmt[i] == 'e')
2130 {
2131 if (volatile_insn_p (XEXP (x, i)))
2132 return 1;
2133 }
2134 else if (fmt[i] == 'E')
2135 {
2136 int j;
2137 for (j = 0; j < XVECLEN (x, i); j++)
2138 if (volatile_insn_p (XVECEXP (x, i, j)))
2139 return 1;
2140 }
2141 }
2142 }
2143 return 0;
2144 }
2145
2146 /* Nonzero if X contains any volatile memory references
2147 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2148
2149 int
2150 volatile_refs_p (const_rtx x)
2151 {
2152 const RTX_CODE code = GET_CODE (x);
2153 switch (code)
2154 {
2155 case LABEL_REF:
2156 case SYMBOL_REF:
2157 case CONST:
2158 CASE_CONST_ANY:
2159 case CC0:
2160 case PC:
2161 case REG:
2162 case SCRATCH:
2163 case CLOBBER:
2164 case ADDR_VEC:
2165 case ADDR_DIFF_VEC:
2166 return 0;
2167
2168 case UNSPEC_VOLATILE:
2169 return 1;
2170
2171 case MEM:
2172 case ASM_INPUT:
2173 case ASM_OPERANDS:
2174 if (MEM_VOLATILE_P (x))
2175 return 1;
2176
2177 default:
2178 break;
2179 }
2180
2181 /* Recursively scan the operands of this expression. */
2182
2183 {
2184 const char *const fmt = GET_RTX_FORMAT (code);
2185 int i;
2186
2187 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2188 {
2189 if (fmt[i] == 'e')
2190 {
2191 if (volatile_refs_p (XEXP (x, i)))
2192 return 1;
2193 }
2194 else if (fmt[i] == 'E')
2195 {
2196 int j;
2197 for (j = 0; j < XVECLEN (x, i); j++)
2198 if (volatile_refs_p (XVECEXP (x, i, j)))
2199 return 1;
2200 }
2201 }
2202 }
2203 return 0;
2204 }
2205
2206 /* Similar to above, except that it also rejects register pre- and post-
2207 incrementing. */
2208
2209 int
2210 side_effects_p (const_rtx x)
2211 {
2212 const RTX_CODE code = GET_CODE (x);
2213 switch (code)
2214 {
2215 case LABEL_REF:
2216 case SYMBOL_REF:
2217 case CONST:
2218 CASE_CONST_ANY:
2219 case CC0:
2220 case PC:
2221 case REG:
2222 case SCRATCH:
2223 case ADDR_VEC:
2224 case ADDR_DIFF_VEC:
2225 case VAR_LOCATION:
2226 return 0;
2227
2228 case CLOBBER:
2229 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2230 when some combination can't be done. If we see one, don't think
2231 that we can simplify the expression. */
2232 return (GET_MODE (x) != VOIDmode);
2233
2234 case PRE_INC:
2235 case PRE_DEC:
2236 case POST_INC:
2237 case POST_DEC:
2238 case PRE_MODIFY:
2239 case POST_MODIFY:
2240 case CALL:
2241 case UNSPEC_VOLATILE:
2242 return 1;
2243
2244 case MEM:
2245 case ASM_INPUT:
2246 case ASM_OPERANDS:
2247 if (MEM_VOLATILE_P (x))
2248 return 1;
2249
2250 default:
2251 break;
2252 }
2253
2254 /* Recursively scan the operands of this expression. */
2255
2256 {
2257 const char *fmt = GET_RTX_FORMAT (code);
2258 int i;
2259
2260 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2261 {
2262 if (fmt[i] == 'e')
2263 {
2264 if (side_effects_p (XEXP (x, i)))
2265 return 1;
2266 }
2267 else if (fmt[i] == 'E')
2268 {
2269 int j;
2270 for (j = 0; j < XVECLEN (x, i); j++)
2271 if (side_effects_p (XVECEXP (x, i, j)))
2272 return 1;
2273 }
2274 }
2275 }
2276 return 0;
2277 }
2278 \f
2279 /* Return nonzero if evaluating rtx X might cause a trap.
2280 FLAGS controls how to consider MEMs. A nonzero means the context
2281 of the access may have changed from the original, such that the
2282 address may have become invalid. */
2283
2284 int
2285 may_trap_p_1 (const_rtx x, unsigned flags)
2286 {
2287 int i;
2288 enum rtx_code code;
2289 const char *fmt;
2290
2291 /* We make no distinction currently, but this function is part of
2292 the internal target-hooks ABI so we keep the parameter as
2293 "unsigned flags". */
2294 bool code_changed = flags != 0;
2295
2296 if (x == 0)
2297 return 0;
2298 code = GET_CODE (x);
2299 switch (code)
2300 {
2301 /* Handle these cases quickly. */
2302 CASE_CONST_ANY:
2303 case SYMBOL_REF:
2304 case LABEL_REF:
2305 case CONST:
2306 case PC:
2307 case CC0:
2308 case REG:
2309 case SCRATCH:
2310 return 0;
2311
2312 case UNSPEC:
2313 return targetm.unspec_may_trap_p (x, flags);
2314
2315 case UNSPEC_VOLATILE:
2316 case ASM_INPUT:
2317 case TRAP_IF:
2318 return 1;
2319
2320 case ASM_OPERANDS:
2321 return MEM_VOLATILE_P (x);
2322
2323 /* Memory ref can trap unless it's a static var or a stack slot. */
2324 case MEM:
2325 /* Recognize specific pattern of stack checking probes. */
2326 if (flag_stack_check
2327 && MEM_VOLATILE_P (x)
2328 && XEXP (x, 0) == stack_pointer_rtx)
2329 return 1;
2330 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2331 reference; moving it out of context such as when moving code
2332 when optimizing, might cause its address to become invalid. */
2333 code_changed
2334 || !MEM_NOTRAP_P (x))
2335 {
2336 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2337 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2338 GET_MODE (x), code_changed);
2339 }
2340
2341 return 0;
2342
2343 /* Division by a non-constant might trap. */
2344 case DIV:
2345 case MOD:
2346 case UDIV:
2347 case UMOD:
2348 if (HONOR_SNANS (GET_MODE (x)))
2349 return 1;
2350 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2351 return flag_trapping_math;
2352 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2353 return 1;
2354 break;
2355
2356 case EXPR_LIST:
2357 /* An EXPR_LIST is used to represent a function call. This
2358 certainly may trap. */
2359 return 1;
2360
2361 case GE:
2362 case GT:
2363 case LE:
2364 case LT:
2365 case LTGT:
2366 case COMPARE:
2367 /* Some floating point comparisons may trap. */
2368 if (!flag_trapping_math)
2369 break;
2370 /* ??? There is no machine independent way to check for tests that trap
2371 when COMPARE is used, though many targets do make this distinction.
2372 For instance, sparc uses CCFPE for compares which generate exceptions
2373 and CCFP for compares which do not generate exceptions. */
2374 if (HONOR_NANS (GET_MODE (x)))
2375 return 1;
2376 /* But often the compare has some CC mode, so check operand
2377 modes as well. */
2378 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2379 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2380 return 1;
2381 break;
2382
2383 case EQ:
2384 case NE:
2385 if (HONOR_SNANS (GET_MODE (x)))
2386 return 1;
2387 /* Often comparison is CC mode, so check operand modes. */
2388 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2389 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2390 return 1;
2391 break;
2392
2393 case FIX:
2394 /* Conversion of floating point might trap. */
2395 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2396 return 1;
2397 break;
2398
2399 case NEG:
2400 case ABS:
2401 case SUBREG:
2402 /* These operations don't trap even with floating point. */
2403 break;
2404
2405 default:
2406 /* Any floating arithmetic may trap. */
2407 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2408 return 1;
2409 }
2410
2411 fmt = GET_RTX_FORMAT (code);
2412 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2413 {
2414 if (fmt[i] == 'e')
2415 {
2416 if (may_trap_p_1 (XEXP (x, i), flags))
2417 return 1;
2418 }
2419 else if (fmt[i] == 'E')
2420 {
2421 int j;
2422 for (j = 0; j < XVECLEN (x, i); j++)
2423 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2424 return 1;
2425 }
2426 }
2427 return 0;
2428 }
2429
2430 /* Return nonzero if evaluating rtx X might cause a trap. */
2431
2432 int
2433 may_trap_p (const_rtx x)
2434 {
2435 return may_trap_p_1 (x, 0);
2436 }
2437
2438 /* Same as above, but additionally return nonzero if evaluating rtx X might
2439 cause a fault. We define a fault for the purpose of this function as a
2440 erroneous execution condition that cannot be encountered during the normal
2441 execution of a valid program; the typical example is an unaligned memory
2442 access on a strict alignment machine. The compiler guarantees that it
2443 doesn't generate code that will fault from a valid program, but this
2444 guarantee doesn't mean anything for individual instructions. Consider
2445 the following example:
2446
2447 struct S { int d; union { char *cp; int *ip; }; };
2448
2449 int foo(struct S *s)
2450 {
2451 if (s->d == 1)
2452 return *s->ip;
2453 else
2454 return *s->cp;
2455 }
2456
2457 on a strict alignment machine. In a valid program, foo will never be
2458 invoked on a structure for which d is equal to 1 and the underlying
2459 unique field of the union not aligned on a 4-byte boundary, but the
2460 expression *s->ip might cause a fault if considered individually.
2461
2462 At the RTL level, potentially problematic expressions will almost always
2463 verify may_trap_p; for example, the above dereference can be emitted as
2464 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2465 However, suppose that foo is inlined in a caller that causes s->cp to
2466 point to a local character variable and guarantees that s->d is not set
2467 to 1; foo may have been effectively translated into pseudo-RTL as:
2468
2469 if ((reg:SI) == 1)
2470 (set (reg:SI) (mem:SI (%fp - 7)))
2471 else
2472 (set (reg:QI) (mem:QI (%fp - 7)))
2473
2474 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2475 memory reference to a stack slot, but it will certainly cause a fault
2476 on a strict alignment machine. */
2477
2478 int
2479 may_trap_or_fault_p (const_rtx x)
2480 {
2481 return may_trap_p_1 (x, 1);
2482 }
2483 \f
2484 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2485 i.e., an inequality. */
2486
2487 int
2488 inequality_comparisons_p (const_rtx x)
2489 {
2490 const char *fmt;
2491 int len, i;
2492 const enum rtx_code code = GET_CODE (x);
2493
2494 switch (code)
2495 {
2496 case REG:
2497 case SCRATCH:
2498 case PC:
2499 case CC0:
2500 CASE_CONST_ANY:
2501 case CONST:
2502 case LABEL_REF:
2503 case SYMBOL_REF:
2504 return 0;
2505
2506 case LT:
2507 case LTU:
2508 case GT:
2509 case GTU:
2510 case LE:
2511 case LEU:
2512 case GE:
2513 case GEU:
2514 return 1;
2515
2516 default:
2517 break;
2518 }
2519
2520 len = GET_RTX_LENGTH (code);
2521 fmt = GET_RTX_FORMAT (code);
2522
2523 for (i = 0; i < len; i++)
2524 {
2525 if (fmt[i] == 'e')
2526 {
2527 if (inequality_comparisons_p (XEXP (x, i)))
2528 return 1;
2529 }
2530 else if (fmt[i] == 'E')
2531 {
2532 int j;
2533 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2534 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2535 return 1;
2536 }
2537 }
2538
2539 return 0;
2540 }
2541 \f
2542 /* Replace any occurrence of FROM in X with TO. The function does
2543 not enter into CONST_DOUBLE for the replace.
2544
2545 Note that copying is not done so X must not be shared unless all copies
2546 are to be modified. */
2547
2548 rtx
2549 replace_rtx (rtx x, rtx from, rtx to)
2550 {
2551 int i, j;
2552 const char *fmt;
2553
2554 if (x == from)
2555 return to;
2556
2557 /* Allow this function to make replacements in EXPR_LISTs. */
2558 if (x == 0)
2559 return 0;
2560
2561 if (GET_CODE (x) == SUBREG)
2562 {
2563 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2564
2565 if (CONST_INT_P (new_rtx))
2566 {
2567 x = simplify_subreg (GET_MODE (x), new_rtx,
2568 GET_MODE (SUBREG_REG (x)),
2569 SUBREG_BYTE (x));
2570 gcc_assert (x);
2571 }
2572 else
2573 SUBREG_REG (x) = new_rtx;
2574
2575 return x;
2576 }
2577 else if (GET_CODE (x) == ZERO_EXTEND)
2578 {
2579 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2580
2581 if (CONST_INT_P (new_rtx))
2582 {
2583 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2584 new_rtx, GET_MODE (XEXP (x, 0)));
2585 gcc_assert (x);
2586 }
2587 else
2588 XEXP (x, 0) = new_rtx;
2589
2590 return x;
2591 }
2592
2593 fmt = GET_RTX_FORMAT (GET_CODE (x));
2594 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2595 {
2596 if (fmt[i] == 'e')
2597 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2598 else if (fmt[i] == 'E')
2599 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2600 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2601 }
2602
2603 return x;
2604 }
2605 \f
2606 /* Replace occurrences of the old label in *X with the new one.
2607 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2608
2609 int
2610 replace_label (rtx *x, void *data)
2611 {
2612 rtx l = *x;
2613 rtx old_label = ((replace_label_data *) data)->r1;
2614 rtx new_label = ((replace_label_data *) data)->r2;
2615 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2616
2617 if (l == NULL_RTX)
2618 return 0;
2619
2620 if (GET_CODE (l) == SYMBOL_REF
2621 && CONSTANT_POOL_ADDRESS_P (l))
2622 {
2623 rtx c = get_pool_constant (l);
2624 if (rtx_referenced_p (old_label, c))
2625 {
2626 rtx new_c, new_l;
2627 replace_label_data *d = (replace_label_data *) data;
2628
2629 /* Create a copy of constant C; replace the label inside
2630 but do not update LABEL_NUSES because uses in constant pool
2631 are not counted. */
2632 new_c = copy_rtx (c);
2633 d->update_label_nuses = false;
2634 for_each_rtx (&new_c, replace_label, data);
2635 d->update_label_nuses = update_label_nuses;
2636
2637 /* Add the new constant NEW_C to constant pool and replace
2638 the old reference to constant by new reference. */
2639 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2640 *x = replace_rtx (l, l, new_l);
2641 }
2642 return 0;
2643 }
2644
2645 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2646 field. This is not handled by for_each_rtx because it doesn't
2647 handle unprinted ('0') fields. */
2648 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2649 JUMP_LABEL (l) = new_label;
2650
2651 if ((GET_CODE (l) == LABEL_REF
2652 || GET_CODE (l) == INSN_LIST)
2653 && XEXP (l, 0) == old_label)
2654 {
2655 XEXP (l, 0) = new_label;
2656 if (update_label_nuses)
2657 {
2658 ++LABEL_NUSES (new_label);
2659 --LABEL_NUSES (old_label);
2660 }
2661 return 0;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /* When *BODY is equal to X or X is directly referenced by *BODY
2668 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2669 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2670
2671 static int
2672 rtx_referenced_p_1 (rtx *body, void *x)
2673 {
2674 rtx y = (rtx) x;
2675
2676 if (*body == NULL_RTX)
2677 return y == NULL_RTX;
2678
2679 /* Return true if a label_ref *BODY refers to label Y. */
2680 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2681 return XEXP (*body, 0) == y;
2682
2683 /* If *BODY is a reference to pool constant traverse the constant. */
2684 if (GET_CODE (*body) == SYMBOL_REF
2685 && CONSTANT_POOL_ADDRESS_P (*body))
2686 return rtx_referenced_p (y, get_pool_constant (*body));
2687
2688 /* By default, compare the RTL expressions. */
2689 return rtx_equal_p (*body, y);
2690 }
2691
2692 /* Return true if X is referenced in BODY. */
2693
2694 int
2695 rtx_referenced_p (rtx x, rtx body)
2696 {
2697 return for_each_rtx (&body, rtx_referenced_p_1, x);
2698 }
2699
2700 /* If INSN is a tablejump return true and store the label (before jump table) to
2701 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2702
2703 bool
2704 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2705 {
2706 rtx label, table;
2707
2708 if (!JUMP_P (insn))
2709 return false;
2710
2711 label = JUMP_LABEL (insn);
2712 if (label != NULL_RTX && !ANY_RETURN_P (label)
2713 && (table = next_active_insn (label)) != NULL_RTX
2714 && JUMP_TABLE_DATA_P (table))
2715 {
2716 if (labelp)
2717 *labelp = label;
2718 if (tablep)
2719 *tablep = table;
2720 return true;
2721 }
2722 return false;
2723 }
2724
2725 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2726 constant that is not in the constant pool and not in the condition
2727 of an IF_THEN_ELSE. */
2728
2729 static int
2730 computed_jump_p_1 (const_rtx x)
2731 {
2732 const enum rtx_code code = GET_CODE (x);
2733 int i, j;
2734 const char *fmt;
2735
2736 switch (code)
2737 {
2738 case LABEL_REF:
2739 case PC:
2740 return 0;
2741
2742 case CONST:
2743 CASE_CONST_ANY:
2744 case SYMBOL_REF:
2745 case REG:
2746 return 1;
2747
2748 case MEM:
2749 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2750 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2751
2752 case IF_THEN_ELSE:
2753 return (computed_jump_p_1 (XEXP (x, 1))
2754 || computed_jump_p_1 (XEXP (x, 2)));
2755
2756 default:
2757 break;
2758 }
2759
2760 fmt = GET_RTX_FORMAT (code);
2761 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2762 {
2763 if (fmt[i] == 'e'
2764 && computed_jump_p_1 (XEXP (x, i)))
2765 return 1;
2766
2767 else if (fmt[i] == 'E')
2768 for (j = 0; j < XVECLEN (x, i); j++)
2769 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2770 return 1;
2771 }
2772
2773 return 0;
2774 }
2775
2776 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2777
2778 Tablejumps and casesi insns are not considered indirect jumps;
2779 we can recognize them by a (use (label_ref)). */
2780
2781 int
2782 computed_jump_p (const_rtx insn)
2783 {
2784 int i;
2785 if (JUMP_P (insn))
2786 {
2787 rtx pat = PATTERN (insn);
2788
2789 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2790 if (JUMP_LABEL (insn) != NULL)
2791 return 0;
2792
2793 if (GET_CODE (pat) == PARALLEL)
2794 {
2795 int len = XVECLEN (pat, 0);
2796 int has_use_labelref = 0;
2797
2798 for (i = len - 1; i >= 0; i--)
2799 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2800 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2801 == LABEL_REF))
2802 has_use_labelref = 1;
2803
2804 if (! has_use_labelref)
2805 for (i = len - 1; i >= 0; i--)
2806 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2807 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2808 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2809 return 1;
2810 }
2811 else if (GET_CODE (pat) == SET
2812 && SET_DEST (pat) == pc_rtx
2813 && computed_jump_p_1 (SET_SRC (pat)))
2814 return 1;
2815 }
2816 return 0;
2817 }
2818
2819 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2820 calls. Processes the subexpressions of EXP and passes them to F. */
2821 static int
2822 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2823 {
2824 int result, i, j;
2825 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2826 rtx *x;
2827
2828 for (; format[n] != '\0'; n++)
2829 {
2830 switch (format[n])
2831 {
2832 case 'e':
2833 /* Call F on X. */
2834 x = &XEXP (exp, n);
2835 result = (*f) (x, data);
2836 if (result == -1)
2837 /* Do not traverse sub-expressions. */
2838 continue;
2839 else if (result != 0)
2840 /* Stop the traversal. */
2841 return result;
2842
2843 if (*x == NULL_RTX)
2844 /* There are no sub-expressions. */
2845 continue;
2846
2847 i = non_rtx_starting_operands[GET_CODE (*x)];
2848 if (i >= 0)
2849 {
2850 result = for_each_rtx_1 (*x, i, f, data);
2851 if (result != 0)
2852 return result;
2853 }
2854 break;
2855
2856 case 'V':
2857 case 'E':
2858 if (XVEC (exp, n) == 0)
2859 continue;
2860 for (j = 0; j < XVECLEN (exp, n); ++j)
2861 {
2862 /* Call F on X. */
2863 x = &XVECEXP (exp, n, j);
2864 result = (*f) (x, data);
2865 if (result == -1)
2866 /* Do not traverse sub-expressions. */
2867 continue;
2868 else if (result != 0)
2869 /* Stop the traversal. */
2870 return result;
2871
2872 if (*x == NULL_RTX)
2873 /* There are no sub-expressions. */
2874 continue;
2875
2876 i = non_rtx_starting_operands[GET_CODE (*x)];
2877 if (i >= 0)
2878 {
2879 result = for_each_rtx_1 (*x, i, f, data);
2880 if (result != 0)
2881 return result;
2882 }
2883 }
2884 break;
2885
2886 default:
2887 /* Nothing to do. */
2888 break;
2889 }
2890 }
2891
2892 return 0;
2893 }
2894
2895 /* Traverse X via depth-first search, calling F for each
2896 sub-expression (including X itself). F is also passed the DATA.
2897 If F returns -1, do not traverse sub-expressions, but continue
2898 traversing the rest of the tree. If F ever returns any other
2899 nonzero value, stop the traversal, and return the value returned
2900 by F. Otherwise, return 0. This function does not traverse inside
2901 tree structure that contains RTX_EXPRs, or into sub-expressions
2902 whose format code is `0' since it is not known whether or not those
2903 codes are actually RTL.
2904
2905 This routine is very general, and could (should?) be used to
2906 implement many of the other routines in this file. */
2907
2908 int
2909 for_each_rtx (rtx *x, rtx_function f, void *data)
2910 {
2911 int result;
2912 int i;
2913
2914 /* Call F on X. */
2915 result = (*f) (x, data);
2916 if (result == -1)
2917 /* Do not traverse sub-expressions. */
2918 return 0;
2919 else if (result != 0)
2920 /* Stop the traversal. */
2921 return result;
2922
2923 if (*x == NULL_RTX)
2924 /* There are no sub-expressions. */
2925 return 0;
2926
2927 i = non_rtx_starting_operands[GET_CODE (*x)];
2928 if (i < 0)
2929 return 0;
2930
2931 return for_each_rtx_1 (*x, i, f, data);
2932 }
2933
2934 \f
2935
2936 /* Data structure that holds the internal state communicated between
2937 for_each_inc_dec, for_each_inc_dec_find_mem and
2938 for_each_inc_dec_find_inc_dec. */
2939
2940 struct for_each_inc_dec_ops {
2941 /* The function to be called for each autoinc operation found. */
2942 for_each_inc_dec_fn fn;
2943 /* The opaque argument to be passed to it. */
2944 void *arg;
2945 /* The MEM we're visiting, if any. */
2946 rtx mem;
2947 };
2948
2949 static int for_each_inc_dec_find_mem (rtx *r, void *d);
2950
2951 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2952 operands of the equivalent add insn and pass the result to the
2953 operator specified by *D. */
2954
2955 static int
2956 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
2957 {
2958 rtx x = *r;
2959 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
2960
2961 switch (GET_CODE (x))
2962 {
2963 case PRE_INC:
2964 case POST_INC:
2965 {
2966 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2967 rtx r1 = XEXP (x, 0);
2968 rtx c = gen_int_mode (size, GET_MODE (r1));
2969 return data->fn (data->mem, x, r1, r1, c, data->arg);
2970 }
2971
2972 case PRE_DEC:
2973 case POST_DEC:
2974 {
2975 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2976 rtx r1 = XEXP (x, 0);
2977 rtx c = gen_int_mode (-size, GET_MODE (r1));
2978 return data->fn (data->mem, x, r1, r1, c, data->arg);
2979 }
2980
2981 case PRE_MODIFY:
2982 case POST_MODIFY:
2983 {
2984 rtx r1 = XEXP (x, 0);
2985 rtx add = XEXP (x, 1);
2986 return data->fn (data->mem, x, r1, add, NULL, data->arg);
2987 }
2988
2989 case MEM:
2990 {
2991 rtx save = data->mem;
2992 int ret = for_each_inc_dec_find_mem (r, d);
2993 data->mem = save;
2994 return ret;
2995 }
2996
2997 default:
2998 return 0;
2999 }
3000 }
3001
3002 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3003 address, extract the operands of the equivalent add insn and pass
3004 the result to the operator specified by *D. */
3005
3006 static int
3007 for_each_inc_dec_find_mem (rtx *r, void *d)
3008 {
3009 rtx x = *r;
3010 if (x != NULL_RTX && MEM_P (x))
3011 {
3012 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3013 int result;
3014
3015 data->mem = x;
3016
3017 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3018 data);
3019 if (result)
3020 return result;
3021
3022 return -1;
3023 }
3024 return 0;
3025 }
3026
3027 /* Traverse *X looking for MEMs, and for autoinc operations within
3028 them. For each such autoinc operation found, call FN, passing it
3029 the innermost enclosing MEM, the operation itself, the RTX modified
3030 by the operation, two RTXs (the second may be NULL) that, once
3031 added, represent the value to be held by the modified RTX
3032 afterwards, and ARG. FN is to return -1 to skip looking for other
3033 autoinc operations within the visited operation, 0 to continue the
3034 traversal, or any other value to have it returned to the caller of
3035 for_each_inc_dec. */
3036
3037 int
3038 for_each_inc_dec (rtx *x,
3039 for_each_inc_dec_fn fn,
3040 void *arg)
3041 {
3042 struct for_each_inc_dec_ops data;
3043
3044 data.fn = fn;
3045 data.arg = arg;
3046 data.mem = NULL;
3047
3048 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3049 }
3050
3051 \f
3052 /* Searches X for any reference to REGNO, returning the rtx of the
3053 reference found if any. Otherwise, returns NULL_RTX. */
3054
3055 rtx
3056 regno_use_in (unsigned int regno, rtx x)
3057 {
3058 const char *fmt;
3059 int i, j;
3060 rtx tem;
3061
3062 if (REG_P (x) && REGNO (x) == regno)
3063 return x;
3064
3065 fmt = GET_RTX_FORMAT (GET_CODE (x));
3066 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3067 {
3068 if (fmt[i] == 'e')
3069 {
3070 if ((tem = regno_use_in (regno, XEXP (x, i))))
3071 return tem;
3072 }
3073 else if (fmt[i] == 'E')
3074 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3075 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3076 return tem;
3077 }
3078
3079 return NULL_RTX;
3080 }
3081
3082 /* Return a value indicating whether OP, an operand of a commutative
3083 operation, is preferred as the first or second operand. The higher
3084 the value, the stronger the preference for being the first operand.
3085 We use negative values to indicate a preference for the first operand
3086 and positive values for the second operand. */
3087
3088 int
3089 commutative_operand_precedence (rtx op)
3090 {
3091 enum rtx_code code = GET_CODE (op);
3092
3093 /* Constants always come the second operand. Prefer "nice" constants. */
3094 if (code == CONST_INT)
3095 return -8;
3096 if (code == CONST_DOUBLE)
3097 return -7;
3098 if (code == CONST_FIXED)
3099 return -7;
3100 op = avoid_constant_pool_reference (op);
3101 code = GET_CODE (op);
3102
3103 switch (GET_RTX_CLASS (code))
3104 {
3105 case RTX_CONST_OBJ:
3106 if (code == CONST_INT)
3107 return -6;
3108 if (code == CONST_DOUBLE)
3109 return -5;
3110 if (code == CONST_FIXED)
3111 return -5;
3112 return -4;
3113
3114 case RTX_EXTRA:
3115 /* SUBREGs of objects should come second. */
3116 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3117 return -3;
3118 return 0;
3119
3120 case RTX_OBJ:
3121 /* Complex expressions should be the first, so decrease priority
3122 of objects. Prefer pointer objects over non pointer objects. */
3123 if ((REG_P (op) && REG_POINTER (op))
3124 || (MEM_P (op) && MEM_POINTER (op)))
3125 return -1;
3126 return -2;
3127
3128 case RTX_COMM_ARITH:
3129 /* Prefer operands that are themselves commutative to be first.
3130 This helps to make things linear. In particular,
3131 (and (and (reg) (reg)) (not (reg))) is canonical. */
3132 return 4;
3133
3134 case RTX_BIN_ARITH:
3135 /* If only one operand is a binary expression, it will be the first
3136 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3137 is canonical, although it will usually be further simplified. */
3138 return 2;
3139
3140 case RTX_UNARY:
3141 /* Then prefer NEG and NOT. */
3142 if (code == NEG || code == NOT)
3143 return 1;
3144
3145 default:
3146 return 0;
3147 }
3148 }
3149
3150 /* Return 1 iff it is necessary to swap operands of commutative operation
3151 in order to canonicalize expression. */
3152
3153 bool
3154 swap_commutative_operands_p (rtx x, rtx y)
3155 {
3156 return (commutative_operand_precedence (x)
3157 < commutative_operand_precedence (y));
3158 }
3159
3160 /* Return 1 if X is an autoincrement side effect and the register is
3161 not the stack pointer. */
3162 int
3163 auto_inc_p (const_rtx x)
3164 {
3165 switch (GET_CODE (x))
3166 {
3167 case PRE_INC:
3168 case POST_INC:
3169 case PRE_DEC:
3170 case POST_DEC:
3171 case PRE_MODIFY:
3172 case POST_MODIFY:
3173 /* There are no REG_INC notes for SP. */
3174 if (XEXP (x, 0) != stack_pointer_rtx)
3175 return 1;
3176 default:
3177 break;
3178 }
3179 return 0;
3180 }
3181
3182 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3183 int
3184 loc_mentioned_in_p (rtx *loc, const_rtx in)
3185 {
3186 enum rtx_code code;
3187 const char *fmt;
3188 int i, j;
3189
3190 if (!in)
3191 return 0;
3192
3193 code = GET_CODE (in);
3194 fmt = GET_RTX_FORMAT (code);
3195 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3196 {
3197 if (fmt[i] == 'e')
3198 {
3199 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3200 return 1;
3201 }
3202 else if (fmt[i] == 'E')
3203 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3204 if (loc == &XVECEXP (in, i, j)
3205 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3206 return 1;
3207 }
3208 return 0;
3209 }
3210
3211 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3212 and SUBREG_BYTE, return the bit offset where the subreg begins
3213 (counting from the least significant bit of the operand). */
3214
3215 unsigned int
3216 subreg_lsb_1 (enum machine_mode outer_mode,
3217 enum machine_mode inner_mode,
3218 unsigned int subreg_byte)
3219 {
3220 unsigned int bitpos;
3221 unsigned int byte;
3222 unsigned int word;
3223
3224 /* A paradoxical subreg begins at bit position 0. */
3225 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3226 return 0;
3227
3228 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3229 /* If the subreg crosses a word boundary ensure that
3230 it also begins and ends on a word boundary. */
3231 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3232 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3233 && (subreg_byte % UNITS_PER_WORD
3234 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3235
3236 if (WORDS_BIG_ENDIAN)
3237 word = (GET_MODE_SIZE (inner_mode)
3238 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3239 else
3240 word = subreg_byte / UNITS_PER_WORD;
3241 bitpos = word * BITS_PER_WORD;
3242
3243 if (BYTES_BIG_ENDIAN)
3244 byte = (GET_MODE_SIZE (inner_mode)
3245 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3246 else
3247 byte = subreg_byte % UNITS_PER_WORD;
3248 bitpos += byte * BITS_PER_UNIT;
3249
3250 return bitpos;
3251 }
3252
3253 /* Given a subreg X, return the bit offset where the subreg begins
3254 (counting from the least significant bit of the reg). */
3255
3256 unsigned int
3257 subreg_lsb (const_rtx x)
3258 {
3259 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3260 SUBREG_BYTE (x));
3261 }
3262
3263 /* Fill in information about a subreg of a hard register.
3264 xregno - A regno of an inner hard subreg_reg (or what will become one).
3265 xmode - The mode of xregno.
3266 offset - The byte offset.
3267 ymode - The mode of a top level SUBREG (or what may become one).
3268 info - Pointer to structure to fill in. */
3269 void
3270 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3271 unsigned int offset, enum machine_mode ymode,
3272 struct subreg_info *info)
3273 {
3274 int nregs_xmode, nregs_ymode;
3275 int mode_multiple, nregs_multiple;
3276 int offset_adj, y_offset, y_offset_adj;
3277 int regsize_xmode, regsize_ymode;
3278 bool rknown;
3279
3280 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3281
3282 rknown = false;
3283
3284 /* If there are holes in a non-scalar mode in registers, we expect
3285 that it is made up of its units concatenated together. */
3286 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3287 {
3288 enum machine_mode xmode_unit;
3289
3290 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3291 if (GET_MODE_INNER (xmode) == VOIDmode)
3292 xmode_unit = xmode;
3293 else
3294 xmode_unit = GET_MODE_INNER (xmode);
3295 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3296 gcc_assert (nregs_xmode
3297 == (GET_MODE_NUNITS (xmode)
3298 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3299 gcc_assert (hard_regno_nregs[xregno][xmode]
3300 == (hard_regno_nregs[xregno][xmode_unit]
3301 * GET_MODE_NUNITS (xmode)));
3302
3303 /* You can only ask for a SUBREG of a value with holes in the middle
3304 if you don't cross the holes. (Such a SUBREG should be done by
3305 picking a different register class, or doing it in memory if
3306 necessary.) An example of a value with holes is XCmode on 32-bit
3307 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3308 3 for each part, but in memory it's two 128-bit parts.
3309 Padding is assumed to be at the end (not necessarily the 'high part')
3310 of each unit. */
3311 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3312 < GET_MODE_NUNITS (xmode))
3313 && (offset / GET_MODE_SIZE (xmode_unit)
3314 != ((offset + GET_MODE_SIZE (ymode) - 1)
3315 / GET_MODE_SIZE (xmode_unit))))
3316 {
3317 info->representable_p = false;
3318 rknown = true;
3319 }
3320 }
3321 else
3322 nregs_xmode = hard_regno_nregs[xregno][xmode];
3323
3324 nregs_ymode = hard_regno_nregs[xregno][ymode];
3325
3326 /* Paradoxical subregs are otherwise valid. */
3327 if (!rknown
3328 && offset == 0
3329 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3330 {
3331 info->representable_p = true;
3332 /* If this is a big endian paradoxical subreg, which uses more
3333 actual hard registers than the original register, we must
3334 return a negative offset so that we find the proper highpart
3335 of the register. */
3336 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3337 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3338 info->offset = nregs_xmode - nregs_ymode;
3339 else
3340 info->offset = 0;
3341 info->nregs = nregs_ymode;
3342 return;
3343 }
3344
3345 /* If registers store different numbers of bits in the different
3346 modes, we cannot generally form this subreg. */
3347 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3348 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3349 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3350 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3351 {
3352 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3353 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3354 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3355 {
3356 info->representable_p = false;
3357 info->nregs
3358 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3359 info->offset = offset / regsize_xmode;
3360 return;
3361 }
3362 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3363 {
3364 info->representable_p = false;
3365 info->nregs
3366 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3367 info->offset = offset / regsize_xmode;
3368 return;
3369 }
3370 }
3371
3372 /* Lowpart subregs are otherwise valid. */
3373 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3374 {
3375 info->representable_p = true;
3376 rknown = true;
3377
3378 if (offset == 0 || nregs_xmode == nregs_ymode)
3379 {
3380 info->offset = 0;
3381 info->nregs = nregs_ymode;
3382 return;
3383 }
3384 }
3385
3386 /* This should always pass, otherwise we don't know how to verify
3387 the constraint. These conditions may be relaxed but
3388 subreg_regno_offset would need to be redesigned. */
3389 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3390 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3391
3392 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3393 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3394 {
3395 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3396 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3397 HOST_WIDE_INT off_low = offset & (ysize - 1);
3398 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3399 offset = (xsize - ysize - off_high) | off_low;
3400 }
3401 /* The XMODE value can be seen as a vector of NREGS_XMODE
3402 values. The subreg must represent a lowpart of given field.
3403 Compute what field it is. */
3404 offset_adj = offset;
3405 offset_adj -= subreg_lowpart_offset (ymode,
3406 mode_for_size (GET_MODE_BITSIZE (xmode)
3407 / nregs_xmode,
3408 MODE_INT, 0));
3409
3410 /* Size of ymode must not be greater than the size of xmode. */
3411 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3412 gcc_assert (mode_multiple != 0);
3413
3414 y_offset = offset / GET_MODE_SIZE (ymode);
3415 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3416 nregs_multiple = nregs_xmode / nregs_ymode;
3417
3418 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3419 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3420
3421 if (!rknown)
3422 {
3423 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3424 rknown = true;
3425 }
3426 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3427 info->nregs = nregs_ymode;
3428 }
3429
3430 /* This function returns the regno offset of a subreg expression.
3431 xregno - A regno of an inner hard subreg_reg (or what will become one).
3432 xmode - The mode of xregno.
3433 offset - The byte offset.
3434 ymode - The mode of a top level SUBREG (or what may become one).
3435 RETURN - The regno offset which would be used. */
3436 unsigned int
3437 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3438 unsigned int offset, enum machine_mode ymode)
3439 {
3440 struct subreg_info info;
3441 subreg_get_info (xregno, xmode, offset, ymode, &info);
3442 return info.offset;
3443 }
3444
3445 /* This function returns true when the offset is representable via
3446 subreg_offset in the given regno.
3447 xregno - A regno of an inner hard subreg_reg (or what will become one).
3448 xmode - The mode of xregno.
3449 offset - The byte offset.
3450 ymode - The mode of a top level SUBREG (or what may become one).
3451 RETURN - Whether the offset is representable. */
3452 bool
3453 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3454 unsigned int offset, enum machine_mode ymode)
3455 {
3456 struct subreg_info info;
3457 subreg_get_info (xregno, xmode, offset, ymode, &info);
3458 return info.representable_p;
3459 }
3460
3461 /* Return the number of a YMODE register to which
3462
3463 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3464
3465 can be simplified. Return -1 if the subreg can't be simplified.
3466
3467 XREGNO is a hard register number. */
3468
3469 int
3470 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3471 unsigned int offset, enum machine_mode ymode)
3472 {
3473 struct subreg_info info;
3474 unsigned int yregno;
3475
3476 #ifdef CANNOT_CHANGE_MODE_CLASS
3477 /* Give the backend a chance to disallow the mode change. */
3478 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3479 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3480 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3481 /* We can use mode change in LRA for some transformations. */
3482 && ! lra_in_progress)
3483 return -1;
3484 #endif
3485
3486 /* We shouldn't simplify stack-related registers. */
3487 if ((!reload_completed || frame_pointer_needed)
3488 && xregno == FRAME_POINTER_REGNUM)
3489 return -1;
3490
3491 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3492 && xregno == ARG_POINTER_REGNUM)
3493 return -1;
3494
3495 if (xregno == STACK_POINTER_REGNUM
3496 /* We should convert hard stack register in LRA if it is
3497 possible. */
3498 && ! lra_in_progress)
3499 return -1;
3500
3501 /* Try to get the register offset. */
3502 subreg_get_info (xregno, xmode, offset, ymode, &info);
3503 if (!info.representable_p)
3504 return -1;
3505
3506 /* Make sure that the offsetted register value is in range. */
3507 yregno = xregno + info.offset;
3508 if (!HARD_REGISTER_NUM_P (yregno))
3509 return -1;
3510
3511 /* See whether (reg:YMODE YREGNO) is valid.
3512
3513 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3514 This is a kludge to work around how complex FP arguments are passed
3515 on IA-64 and should be fixed. See PR target/49226. */
3516 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3517 && HARD_REGNO_MODE_OK (xregno, xmode))
3518 return -1;
3519
3520 return (int) yregno;
3521 }
3522
3523 /* Return the final regno that a subreg expression refers to. */
3524 unsigned int
3525 subreg_regno (const_rtx x)
3526 {
3527 unsigned int ret;
3528 rtx subreg = SUBREG_REG (x);
3529 int regno = REGNO (subreg);
3530
3531 ret = regno + subreg_regno_offset (regno,
3532 GET_MODE (subreg),
3533 SUBREG_BYTE (x),
3534 GET_MODE (x));
3535 return ret;
3536
3537 }
3538
3539 /* Return the number of registers that a subreg expression refers
3540 to. */
3541 unsigned int
3542 subreg_nregs (const_rtx x)
3543 {
3544 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3545 }
3546
3547 /* Return the number of registers that a subreg REG with REGNO
3548 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3549 changed so that the regno can be passed in. */
3550
3551 unsigned int
3552 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3553 {
3554 struct subreg_info info;
3555 rtx subreg = SUBREG_REG (x);
3556
3557 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3558 &info);
3559 return info.nregs;
3560 }
3561
3562
3563 struct parms_set_data
3564 {
3565 int nregs;
3566 HARD_REG_SET regs;
3567 };
3568
3569 /* Helper function for noticing stores to parameter registers. */
3570 static void
3571 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3572 {
3573 struct parms_set_data *const d = (struct parms_set_data *) data;
3574 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3575 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3576 {
3577 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3578 d->nregs--;
3579 }
3580 }
3581
3582 /* Look backward for first parameter to be loaded.
3583 Note that loads of all parameters will not necessarily be
3584 found if CSE has eliminated some of them (e.g., an argument
3585 to the outer function is passed down as a parameter).
3586 Do not skip BOUNDARY. */
3587 rtx
3588 find_first_parameter_load (rtx call_insn, rtx boundary)
3589 {
3590 struct parms_set_data parm;
3591 rtx p, before, first_set;
3592
3593 /* Since different machines initialize their parameter registers
3594 in different orders, assume nothing. Collect the set of all
3595 parameter registers. */
3596 CLEAR_HARD_REG_SET (parm.regs);
3597 parm.nregs = 0;
3598 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3599 if (GET_CODE (XEXP (p, 0)) == USE
3600 && REG_P (XEXP (XEXP (p, 0), 0)))
3601 {
3602 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3603
3604 /* We only care about registers which can hold function
3605 arguments. */
3606 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3607 continue;
3608
3609 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3610 parm.nregs++;
3611 }
3612 before = call_insn;
3613 first_set = call_insn;
3614
3615 /* Search backward for the first set of a register in this set. */
3616 while (parm.nregs && before != boundary)
3617 {
3618 before = PREV_INSN (before);
3619
3620 /* It is possible that some loads got CSEed from one call to
3621 another. Stop in that case. */
3622 if (CALL_P (before))
3623 break;
3624
3625 /* Our caller needs either ensure that we will find all sets
3626 (in case code has not been optimized yet), or take care
3627 for possible labels in a way by setting boundary to preceding
3628 CODE_LABEL. */
3629 if (LABEL_P (before))
3630 {
3631 gcc_assert (before == boundary);
3632 break;
3633 }
3634
3635 if (INSN_P (before))
3636 {
3637 int nregs_old = parm.nregs;
3638 note_stores (PATTERN (before), parms_set, &parm);
3639 /* If we found something that did not set a parameter reg,
3640 we're done. Do not keep going, as that might result
3641 in hoisting an insn before the setting of a pseudo
3642 that is used by the hoisted insn. */
3643 if (nregs_old != parm.nregs)
3644 first_set = before;
3645 else
3646 break;
3647 }
3648 }
3649 return first_set;
3650 }
3651
3652 /* Return true if we should avoid inserting code between INSN and preceding
3653 call instruction. */
3654
3655 bool
3656 keep_with_call_p (const_rtx insn)
3657 {
3658 rtx set;
3659
3660 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3661 {
3662 if (REG_P (SET_DEST (set))
3663 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3664 && fixed_regs[REGNO (SET_DEST (set))]
3665 && general_operand (SET_SRC (set), VOIDmode))
3666 return true;
3667 if (REG_P (SET_SRC (set))
3668 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3669 && REG_P (SET_DEST (set))
3670 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3671 return true;
3672 /* There may be a stack pop just after the call and before the store
3673 of the return register. Search for the actual store when deciding
3674 if we can break or not. */
3675 if (SET_DEST (set) == stack_pointer_rtx)
3676 {
3677 /* This CONST_CAST is okay because next_nonnote_insn just
3678 returns its argument and we assign it to a const_rtx
3679 variable. */
3680 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3681 if (i2 && keep_with_call_p (i2))
3682 return true;
3683 }
3684 }
3685 return false;
3686 }
3687
3688 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3689 to non-complex jumps. That is, direct unconditional, conditional,
3690 and tablejumps, but not computed jumps or returns. It also does
3691 not apply to the fallthru case of a conditional jump. */
3692
3693 bool
3694 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3695 {
3696 rtx tmp = JUMP_LABEL (jump_insn);
3697
3698 if (label == tmp)
3699 return true;
3700
3701 if (tablejump_p (jump_insn, NULL, &tmp))
3702 {
3703 rtvec vec = XVEC (PATTERN (tmp),
3704 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3705 int i, veclen = GET_NUM_ELEM (vec);
3706
3707 for (i = 0; i < veclen; ++i)
3708 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3709 return true;
3710 }
3711
3712 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3713 return true;
3714
3715 return false;
3716 }
3717
3718 \f
3719 /* Return an estimate of the cost of computing rtx X.
3720 One use is in cse, to decide which expression to keep in the hash table.
3721 Another is in rtl generation, to pick the cheapest way to multiply.
3722 Other uses like the latter are expected in the future.
3723
3724 X appears as operand OPNO in an expression with code OUTER_CODE.
3725 SPEED specifies whether costs optimized for speed or size should
3726 be returned. */
3727
3728 int
3729 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3730 {
3731 int i, j;
3732 enum rtx_code code;
3733 const char *fmt;
3734 int total;
3735 int factor;
3736
3737 if (x == 0)
3738 return 0;
3739
3740 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3741 many insns, taking N times as long. */
3742 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3743 if (factor == 0)
3744 factor = 1;
3745
3746 /* Compute the default costs of certain things.
3747 Note that targetm.rtx_costs can override the defaults. */
3748
3749 code = GET_CODE (x);
3750 switch (code)
3751 {
3752 case MULT:
3753 /* Multiplication has time-complexity O(N*N), where N is the
3754 number of units (translated from digits) when using
3755 schoolbook long multiplication. */
3756 total = factor * factor * COSTS_N_INSNS (5);
3757 break;
3758 case DIV:
3759 case UDIV:
3760 case MOD:
3761 case UMOD:
3762 /* Similarly, complexity for schoolbook long division. */
3763 total = factor * factor * COSTS_N_INSNS (7);
3764 break;
3765 case USE:
3766 /* Used in combine.c as a marker. */
3767 total = 0;
3768 break;
3769 case SET:
3770 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3771 the mode for the factor. */
3772 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3773 if (factor == 0)
3774 factor = 1;
3775 /* Pass through. */
3776 default:
3777 total = factor * COSTS_N_INSNS (1);
3778 }
3779
3780 switch (code)
3781 {
3782 case REG:
3783 return 0;
3784
3785 case SUBREG:
3786 total = 0;
3787 /* If we can't tie these modes, make this expensive. The larger
3788 the mode, the more expensive it is. */
3789 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3790 return COSTS_N_INSNS (2 + factor);
3791 break;
3792
3793 default:
3794 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3795 return total;
3796 break;
3797 }
3798
3799 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3800 which is already in total. */
3801
3802 fmt = GET_RTX_FORMAT (code);
3803 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3804 if (fmt[i] == 'e')
3805 total += rtx_cost (XEXP (x, i), code, i, speed);
3806 else if (fmt[i] == 'E')
3807 for (j = 0; j < XVECLEN (x, i); j++)
3808 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3809
3810 return total;
3811 }
3812
3813 /* Fill in the structure C with information about both speed and size rtx
3814 costs for X, which is operand OPNO in an expression with code OUTER. */
3815
3816 void
3817 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3818 struct full_rtx_costs *c)
3819 {
3820 c->speed = rtx_cost (x, outer, opno, true);
3821 c->size = rtx_cost (x, outer, opno, false);
3822 }
3823
3824 \f
3825 /* Return cost of address expression X.
3826 Expect that X is properly formed address reference.
3827
3828 SPEED parameter specify whether costs optimized for speed or size should
3829 be returned. */
3830
3831 int
3832 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3833 {
3834 /* We may be asked for cost of various unusual addresses, such as operands
3835 of push instruction. It is not worthwhile to complicate writing
3836 of the target hook by such cases. */
3837
3838 if (!memory_address_addr_space_p (mode, x, as))
3839 return 1000;
3840
3841 return targetm.address_cost (x, mode, as, speed);
3842 }
3843
3844 /* If the target doesn't override, compute the cost as with arithmetic. */
3845
3846 int
3847 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3848 {
3849 return rtx_cost (x, MEM, 0, speed);
3850 }
3851 \f
3852
3853 unsigned HOST_WIDE_INT
3854 nonzero_bits (const_rtx x, enum machine_mode mode)
3855 {
3856 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3857 }
3858
3859 unsigned int
3860 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3861 {
3862 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3863 }
3864
3865 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3866 It avoids exponential behavior in nonzero_bits1 when X has
3867 identical subexpressions on the first or the second level. */
3868
3869 static unsigned HOST_WIDE_INT
3870 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3871 enum machine_mode known_mode,
3872 unsigned HOST_WIDE_INT known_ret)
3873 {
3874 if (x == known_x && mode == known_mode)
3875 return known_ret;
3876
3877 /* Try to find identical subexpressions. If found call
3878 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3879 precomputed value for the subexpression as KNOWN_RET. */
3880
3881 if (ARITHMETIC_P (x))
3882 {
3883 rtx x0 = XEXP (x, 0);
3884 rtx x1 = XEXP (x, 1);
3885
3886 /* Check the first level. */
3887 if (x0 == x1)
3888 return nonzero_bits1 (x, mode, x0, mode,
3889 cached_nonzero_bits (x0, mode, known_x,
3890 known_mode, known_ret));
3891
3892 /* Check the second level. */
3893 if (ARITHMETIC_P (x0)
3894 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3895 return nonzero_bits1 (x, mode, x1, mode,
3896 cached_nonzero_bits (x1, mode, known_x,
3897 known_mode, known_ret));
3898
3899 if (ARITHMETIC_P (x1)
3900 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3901 return nonzero_bits1 (x, mode, x0, mode,
3902 cached_nonzero_bits (x0, mode, known_x,
3903 known_mode, known_ret));
3904 }
3905
3906 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3907 }
3908
3909 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3910 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3911 is less useful. We can't allow both, because that results in exponential
3912 run time recursion. There is a nullstone testcase that triggered
3913 this. This macro avoids accidental uses of num_sign_bit_copies. */
3914 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3915
3916 /* Given an expression, X, compute which bits in X can be nonzero.
3917 We don't care about bits outside of those defined in MODE.
3918
3919 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3920 an arithmetic operation, we can do better. */
3921
3922 static unsigned HOST_WIDE_INT
3923 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3924 enum machine_mode known_mode,
3925 unsigned HOST_WIDE_INT known_ret)
3926 {
3927 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3928 unsigned HOST_WIDE_INT inner_nz;
3929 enum rtx_code code;
3930 enum machine_mode inner_mode;
3931 unsigned int mode_width = GET_MODE_PRECISION (mode);
3932
3933 /* For floating-point and vector values, assume all bits are needed. */
3934 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3935 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3936 return nonzero;
3937
3938 /* If X is wider than MODE, use its mode instead. */
3939 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
3940 {
3941 mode = GET_MODE (x);
3942 nonzero = GET_MODE_MASK (mode);
3943 mode_width = GET_MODE_PRECISION (mode);
3944 }
3945
3946 if (mode_width > HOST_BITS_PER_WIDE_INT)
3947 /* Our only callers in this case look for single bit values. So
3948 just return the mode mask. Those tests will then be false. */
3949 return nonzero;
3950
3951 #ifndef WORD_REGISTER_OPERATIONS
3952 /* If MODE is wider than X, but both are a single word for both the host
3953 and target machines, we can compute this from which bits of the
3954 object might be nonzero in its own mode, taking into account the fact
3955 that on many CISC machines, accessing an object in a wider mode
3956 causes the high-order bits to become undefined. So they are
3957 not known to be zero. */
3958
3959 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3960 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
3961 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3962 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
3963 {
3964 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3965 known_x, known_mode, known_ret);
3966 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3967 return nonzero;
3968 }
3969 #endif
3970
3971 code = GET_CODE (x);
3972 switch (code)
3973 {
3974 case REG:
3975 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3976 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3977 all the bits above ptr_mode are known to be zero. */
3978 /* As we do not know which address space the pointer is referring to,
3979 we can do this only if the target does not support different pointer
3980 or address modes depending on the address space. */
3981 if (target_default_pointer_address_modes_p ()
3982 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3983 && REG_POINTER (x))
3984 nonzero &= GET_MODE_MASK (ptr_mode);
3985 #endif
3986
3987 /* Include declared information about alignment of pointers. */
3988 /* ??? We don't properly preserve REG_POINTER changes across
3989 pointer-to-integer casts, so we can't trust it except for
3990 things that we know must be pointers. See execute/960116-1.c. */
3991 if ((x == stack_pointer_rtx
3992 || x == frame_pointer_rtx
3993 || x == arg_pointer_rtx)
3994 && REGNO_POINTER_ALIGN (REGNO (x)))
3995 {
3996 unsigned HOST_WIDE_INT alignment
3997 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3998
3999 #ifdef PUSH_ROUNDING
4000 /* If PUSH_ROUNDING is defined, it is possible for the
4001 stack to be momentarily aligned only to that amount,
4002 so we pick the least alignment. */
4003 if (x == stack_pointer_rtx && PUSH_ARGS)
4004 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4005 alignment);
4006 #endif
4007
4008 nonzero &= ~(alignment - 1);
4009 }
4010
4011 {
4012 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4013 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4014 known_mode, known_ret,
4015 &nonzero_for_hook);
4016
4017 if (new_rtx)
4018 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4019 known_mode, known_ret);
4020
4021 return nonzero_for_hook;
4022 }
4023
4024 case CONST_INT:
4025 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4026 /* If X is negative in MODE, sign-extend the value. */
4027 if (INTVAL (x) > 0
4028 && mode_width < BITS_PER_WORD
4029 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4030 != 0)
4031 return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
4032 #endif
4033
4034 return UINTVAL (x);
4035
4036 case MEM:
4037 #ifdef LOAD_EXTEND_OP
4038 /* In many, if not most, RISC machines, reading a byte from memory
4039 zeros the rest of the register. Noticing that fact saves a lot
4040 of extra zero-extends. */
4041 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4042 nonzero &= GET_MODE_MASK (GET_MODE (x));
4043 #endif
4044 break;
4045
4046 case EQ: case NE:
4047 case UNEQ: case LTGT:
4048 case GT: case GTU: case UNGT:
4049 case LT: case LTU: case UNLT:
4050 case GE: case GEU: case UNGE:
4051 case LE: case LEU: case UNLE:
4052 case UNORDERED: case ORDERED:
4053 /* If this produces an integer result, we know which bits are set.
4054 Code here used to clear bits outside the mode of X, but that is
4055 now done above. */
4056 /* Mind that MODE is the mode the caller wants to look at this
4057 operation in, and not the actual operation mode. We can wind
4058 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4059 that describes the results of a vector compare. */
4060 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4061 && mode_width <= HOST_BITS_PER_WIDE_INT)
4062 nonzero = STORE_FLAG_VALUE;
4063 break;
4064
4065 case NEG:
4066 #if 0
4067 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4068 and num_sign_bit_copies. */
4069 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4070 == GET_MODE_PRECISION (GET_MODE (x)))
4071 nonzero = 1;
4072 #endif
4073
4074 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4075 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4076 break;
4077
4078 case ABS:
4079 #if 0
4080 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4081 and num_sign_bit_copies. */
4082 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4083 == GET_MODE_PRECISION (GET_MODE (x)))
4084 nonzero = 1;
4085 #endif
4086 break;
4087
4088 case TRUNCATE:
4089 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4090 known_x, known_mode, known_ret)
4091 & GET_MODE_MASK (mode));
4092 break;
4093
4094 case ZERO_EXTEND:
4095 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4096 known_x, known_mode, known_ret);
4097 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4098 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4099 break;
4100
4101 case SIGN_EXTEND:
4102 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4103 Otherwise, show all the bits in the outer mode but not the inner
4104 may be nonzero. */
4105 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4106 known_x, known_mode, known_ret);
4107 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4108 {
4109 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4110 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4111 inner_nz |= (GET_MODE_MASK (mode)
4112 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4113 }
4114
4115 nonzero &= inner_nz;
4116 break;
4117
4118 case AND:
4119 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4120 known_x, known_mode, known_ret)
4121 & cached_nonzero_bits (XEXP (x, 1), mode,
4122 known_x, known_mode, known_ret);
4123 break;
4124
4125 case XOR: case IOR:
4126 case UMIN: case UMAX: case SMIN: case SMAX:
4127 {
4128 unsigned HOST_WIDE_INT nonzero0
4129 = cached_nonzero_bits (XEXP (x, 0), mode,
4130 known_x, known_mode, known_ret);
4131
4132 /* Don't call nonzero_bits for the second time if it cannot change
4133 anything. */
4134 if ((nonzero & nonzero0) != nonzero)
4135 nonzero &= nonzero0
4136 | cached_nonzero_bits (XEXP (x, 1), mode,
4137 known_x, known_mode, known_ret);
4138 }
4139 break;
4140
4141 case PLUS: case MINUS:
4142 case MULT:
4143 case DIV: case UDIV:
4144 case MOD: case UMOD:
4145 /* We can apply the rules of arithmetic to compute the number of
4146 high- and low-order zero bits of these operations. We start by
4147 computing the width (position of the highest-order nonzero bit)
4148 and the number of low-order zero bits for each value. */
4149 {
4150 unsigned HOST_WIDE_INT nz0
4151 = cached_nonzero_bits (XEXP (x, 0), mode,
4152 known_x, known_mode, known_ret);
4153 unsigned HOST_WIDE_INT nz1
4154 = cached_nonzero_bits (XEXP (x, 1), mode,
4155 known_x, known_mode, known_ret);
4156 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4157 int width0 = floor_log2 (nz0) + 1;
4158 int width1 = floor_log2 (nz1) + 1;
4159 int low0 = floor_log2 (nz0 & -nz0);
4160 int low1 = floor_log2 (nz1 & -nz1);
4161 unsigned HOST_WIDE_INT op0_maybe_minusp
4162 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4163 unsigned HOST_WIDE_INT op1_maybe_minusp
4164 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4165 unsigned int result_width = mode_width;
4166 int result_low = 0;
4167
4168 switch (code)
4169 {
4170 case PLUS:
4171 result_width = MAX (width0, width1) + 1;
4172 result_low = MIN (low0, low1);
4173 break;
4174 case MINUS:
4175 result_low = MIN (low0, low1);
4176 break;
4177 case MULT:
4178 result_width = width0 + width1;
4179 result_low = low0 + low1;
4180 break;
4181 case DIV:
4182 if (width1 == 0)
4183 break;
4184 if (!op0_maybe_minusp && !op1_maybe_minusp)
4185 result_width = width0;
4186 break;
4187 case UDIV:
4188 if (width1 == 0)
4189 break;
4190 result_width = width0;
4191 break;
4192 case MOD:
4193 if (width1 == 0)
4194 break;
4195 if (!op0_maybe_minusp && !op1_maybe_minusp)
4196 result_width = MIN (width0, width1);
4197 result_low = MIN (low0, low1);
4198 break;
4199 case UMOD:
4200 if (width1 == 0)
4201 break;
4202 result_width = MIN (width0, width1);
4203 result_low = MIN (low0, low1);
4204 break;
4205 default:
4206 gcc_unreachable ();
4207 }
4208
4209 if (result_width < mode_width)
4210 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4211
4212 if (result_low > 0)
4213 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4214 }
4215 break;
4216
4217 case ZERO_EXTRACT:
4218 if (CONST_INT_P (XEXP (x, 1))
4219 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4220 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4221 break;
4222
4223 case SUBREG:
4224 /* If this is a SUBREG formed for a promoted variable that has
4225 been zero-extended, we know that at least the high-order bits
4226 are zero, though others might be too. */
4227
4228 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4229 nonzero = GET_MODE_MASK (GET_MODE (x))
4230 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4231 known_x, known_mode, known_ret);
4232
4233 inner_mode = GET_MODE (SUBREG_REG (x));
4234 /* If the inner mode is a single word for both the host and target
4235 machines, we can compute this from which bits of the inner
4236 object might be nonzero. */
4237 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4238 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4239 {
4240 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4241 known_x, known_mode, known_ret);
4242
4243 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4244 /* If this is a typical RISC machine, we only have to worry
4245 about the way loads are extended. */
4246 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4247 ? val_signbit_known_set_p (inner_mode, nonzero)
4248 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4249 || !MEM_P (SUBREG_REG (x)))
4250 #endif
4251 {
4252 /* On many CISC machines, accessing an object in a wider mode
4253 causes the high-order bits to become undefined. So they are
4254 not known to be zero. */
4255 if (GET_MODE_PRECISION (GET_MODE (x))
4256 > GET_MODE_PRECISION (inner_mode))
4257 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4258 & ~GET_MODE_MASK (inner_mode));
4259 }
4260 }
4261 break;
4262
4263 case ASHIFTRT:
4264 case LSHIFTRT:
4265 case ASHIFT:
4266 case ROTATE:
4267 /* The nonzero bits are in two classes: any bits within MODE
4268 that aren't in GET_MODE (x) are always significant. The rest of the
4269 nonzero bits are those that are significant in the operand of
4270 the shift when shifted the appropriate number of bits. This
4271 shows that high-order bits are cleared by the right shift and
4272 low-order bits by left shifts. */
4273 if (CONST_INT_P (XEXP (x, 1))
4274 && INTVAL (XEXP (x, 1)) >= 0
4275 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4276 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4277 {
4278 enum machine_mode inner_mode = GET_MODE (x);
4279 unsigned int width = GET_MODE_PRECISION (inner_mode);
4280 int count = INTVAL (XEXP (x, 1));
4281 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4282 unsigned HOST_WIDE_INT op_nonzero
4283 = cached_nonzero_bits (XEXP (x, 0), mode,
4284 known_x, known_mode, known_ret);
4285 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4286 unsigned HOST_WIDE_INT outer = 0;
4287
4288 if (mode_width > width)
4289 outer = (op_nonzero & nonzero & ~mode_mask);
4290
4291 if (code == LSHIFTRT)
4292 inner >>= count;
4293 else if (code == ASHIFTRT)
4294 {
4295 inner >>= count;
4296
4297 /* If the sign bit may have been nonzero before the shift, we
4298 need to mark all the places it could have been copied to
4299 by the shift as possibly nonzero. */
4300 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4301 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4302 << (width - count);
4303 }
4304 else if (code == ASHIFT)
4305 inner <<= count;
4306 else
4307 inner = ((inner << (count % width)
4308 | (inner >> (width - (count % width)))) & mode_mask);
4309
4310 nonzero &= (outer | inner);
4311 }
4312 break;
4313
4314 case FFS:
4315 case POPCOUNT:
4316 /* This is at most the number of bits in the mode. */
4317 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4318 break;
4319
4320 case CLZ:
4321 /* If CLZ has a known value at zero, then the nonzero bits are
4322 that value, plus the number of bits in the mode minus one. */
4323 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4324 nonzero
4325 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4326 else
4327 nonzero = -1;
4328 break;
4329
4330 case CTZ:
4331 /* If CTZ has a known value at zero, then the nonzero bits are
4332 that value, plus the number of bits in the mode minus one. */
4333 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4334 nonzero
4335 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4336 else
4337 nonzero = -1;
4338 break;
4339
4340 case CLRSB:
4341 /* This is at most the number of bits in the mode minus 1. */
4342 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4343 break;
4344
4345 case PARITY:
4346 nonzero = 1;
4347 break;
4348
4349 case IF_THEN_ELSE:
4350 {
4351 unsigned HOST_WIDE_INT nonzero_true
4352 = cached_nonzero_bits (XEXP (x, 1), mode,
4353 known_x, known_mode, known_ret);
4354
4355 /* Don't call nonzero_bits for the second time if it cannot change
4356 anything. */
4357 if ((nonzero & nonzero_true) != nonzero)
4358 nonzero &= nonzero_true
4359 | cached_nonzero_bits (XEXP (x, 2), mode,
4360 known_x, known_mode, known_ret);
4361 }
4362 break;
4363
4364 default:
4365 break;
4366 }
4367
4368 return nonzero;
4369 }
4370
4371 /* See the macro definition above. */
4372 #undef cached_num_sign_bit_copies
4373
4374 \f
4375 /* The function cached_num_sign_bit_copies is a wrapper around
4376 num_sign_bit_copies1. It avoids exponential behavior in
4377 num_sign_bit_copies1 when X has identical subexpressions on the
4378 first or the second level. */
4379
4380 static unsigned int
4381 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4382 enum machine_mode known_mode,
4383 unsigned int known_ret)
4384 {
4385 if (x == known_x && mode == known_mode)
4386 return known_ret;
4387
4388 /* Try to find identical subexpressions. If found call
4389 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4390 the precomputed value for the subexpression as KNOWN_RET. */
4391
4392 if (ARITHMETIC_P (x))
4393 {
4394 rtx x0 = XEXP (x, 0);
4395 rtx x1 = XEXP (x, 1);
4396
4397 /* Check the first level. */
4398 if (x0 == x1)
4399 return
4400 num_sign_bit_copies1 (x, mode, x0, mode,
4401 cached_num_sign_bit_copies (x0, mode, known_x,
4402 known_mode,
4403 known_ret));
4404
4405 /* Check the second level. */
4406 if (ARITHMETIC_P (x0)
4407 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4408 return
4409 num_sign_bit_copies1 (x, mode, x1, mode,
4410 cached_num_sign_bit_copies (x1, mode, known_x,
4411 known_mode,
4412 known_ret));
4413
4414 if (ARITHMETIC_P (x1)
4415 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4416 return
4417 num_sign_bit_copies1 (x, mode, x0, mode,
4418 cached_num_sign_bit_copies (x0, mode, known_x,
4419 known_mode,
4420 known_ret));
4421 }
4422
4423 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4424 }
4425
4426 /* Return the number of bits at the high-order end of X that are known to
4427 be equal to the sign bit. X will be used in mode MODE; if MODE is
4428 VOIDmode, X will be used in its own mode. The returned value will always
4429 be between 1 and the number of bits in MODE. */
4430
4431 static unsigned int
4432 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4433 enum machine_mode known_mode,
4434 unsigned int known_ret)
4435 {
4436 enum rtx_code code = GET_CODE (x);
4437 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4438 int num0, num1, result;
4439 unsigned HOST_WIDE_INT nonzero;
4440
4441 /* If we weren't given a mode, use the mode of X. If the mode is still
4442 VOIDmode, we don't know anything. Likewise if one of the modes is
4443 floating-point. */
4444
4445 if (mode == VOIDmode)
4446 mode = GET_MODE (x);
4447
4448 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4449 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4450 return 1;
4451
4452 /* For a smaller object, just ignore the high bits. */
4453 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4454 {
4455 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4456 known_x, known_mode, known_ret);
4457 return MAX (1,
4458 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4459 }
4460
4461 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4462 {
4463 #ifndef WORD_REGISTER_OPERATIONS
4464 /* If this machine does not do all register operations on the entire
4465 register and MODE is wider than the mode of X, we can say nothing
4466 at all about the high-order bits. */
4467 return 1;
4468 #else
4469 /* Likewise on machines that do, if the mode of the object is smaller
4470 than a word and loads of that size don't sign extend, we can say
4471 nothing about the high order bits. */
4472 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4473 #ifdef LOAD_EXTEND_OP
4474 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4475 #endif
4476 )
4477 return 1;
4478 #endif
4479 }
4480
4481 switch (code)
4482 {
4483 case REG:
4484
4485 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4486 /* If pointers extend signed and this is a pointer in Pmode, say that
4487 all the bits above ptr_mode are known to be sign bit copies. */
4488 /* As we do not know which address space the pointer is referring to,
4489 we can do this only if the target does not support different pointer
4490 or address modes depending on the address space. */
4491 if (target_default_pointer_address_modes_p ()
4492 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4493 && mode == Pmode && REG_POINTER (x))
4494 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4495 #endif
4496
4497 {
4498 unsigned int copies_for_hook = 1, copies = 1;
4499 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4500 known_mode, known_ret,
4501 &copies_for_hook);
4502
4503 if (new_rtx)
4504 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4505 known_mode, known_ret);
4506
4507 if (copies > 1 || copies_for_hook > 1)
4508 return MAX (copies, copies_for_hook);
4509
4510 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4511 }
4512 break;
4513
4514 case MEM:
4515 #ifdef LOAD_EXTEND_OP
4516 /* Some RISC machines sign-extend all loads of smaller than a word. */
4517 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4518 return MAX (1, ((int) bitwidth
4519 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4520 #endif
4521 break;
4522
4523 case CONST_INT:
4524 /* If the constant is negative, take its 1's complement and remask.
4525 Then see how many zero bits we have. */
4526 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4527 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4528 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4529 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4530
4531 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4532
4533 case SUBREG:
4534 /* If this is a SUBREG for a promoted object that is sign-extended
4535 and we are looking at it in a wider mode, we know that at least the
4536 high-order bits are known to be sign bit copies. */
4537
4538 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4539 {
4540 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4541 known_x, known_mode, known_ret);
4542 return MAX ((int) bitwidth
4543 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4544 num0);
4545 }
4546
4547 /* For a smaller object, just ignore the high bits. */
4548 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4549 {
4550 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4551 known_x, known_mode, known_ret);
4552 return MAX (1, (num0
4553 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4554 - bitwidth)));
4555 }
4556
4557 #ifdef WORD_REGISTER_OPERATIONS
4558 #ifdef LOAD_EXTEND_OP
4559 /* For paradoxical SUBREGs on machines where all register operations
4560 affect the entire register, just look inside. Note that we are
4561 passing MODE to the recursive call, so the number of sign bit copies
4562 will remain relative to that mode, not the inner mode. */
4563
4564 /* This works only if loads sign extend. Otherwise, if we get a
4565 reload for the inner part, it may be loaded from the stack, and
4566 then we lose all sign bit copies that existed before the store
4567 to the stack. */
4568
4569 if (paradoxical_subreg_p (x)
4570 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4571 && MEM_P (SUBREG_REG (x)))
4572 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4573 known_x, known_mode, known_ret);
4574 #endif
4575 #endif
4576 break;
4577
4578 case SIGN_EXTRACT:
4579 if (CONST_INT_P (XEXP (x, 1)))
4580 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4581 break;
4582
4583 case SIGN_EXTEND:
4584 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4585 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4586 known_x, known_mode, known_ret));
4587
4588 case TRUNCATE:
4589 /* For a smaller object, just ignore the high bits. */
4590 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4591 known_x, known_mode, known_ret);
4592 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4593 - bitwidth)));
4594
4595 case NOT:
4596 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4597 known_x, known_mode, known_ret);
4598
4599 case ROTATE: case ROTATERT:
4600 /* If we are rotating left by a number of bits less than the number
4601 of sign bit copies, we can just subtract that amount from the
4602 number. */
4603 if (CONST_INT_P (XEXP (x, 1))
4604 && INTVAL (XEXP (x, 1)) >= 0
4605 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4606 {
4607 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4608 known_x, known_mode, known_ret);
4609 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4610 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4611 }
4612 break;
4613
4614 case NEG:
4615 /* In general, this subtracts one sign bit copy. But if the value
4616 is known to be positive, the number of sign bit copies is the
4617 same as that of the input. Finally, if the input has just one bit
4618 that might be nonzero, all the bits are copies of the sign bit. */
4619 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4620 known_x, known_mode, known_ret);
4621 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4622 return num0 > 1 ? num0 - 1 : 1;
4623
4624 nonzero = nonzero_bits (XEXP (x, 0), mode);
4625 if (nonzero == 1)
4626 return bitwidth;
4627
4628 if (num0 > 1
4629 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4630 num0--;
4631
4632 return num0;
4633
4634 case IOR: case AND: case XOR:
4635 case SMIN: case SMAX: case UMIN: case UMAX:
4636 /* Logical operations will preserve the number of sign-bit copies.
4637 MIN and MAX operations always return one of the operands. */
4638 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4639 known_x, known_mode, known_ret);
4640 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4641 known_x, known_mode, known_ret);
4642
4643 /* If num1 is clearing some of the top bits then regardless of
4644 the other term, we are guaranteed to have at least that many
4645 high-order zero bits. */
4646 if (code == AND
4647 && num1 > 1
4648 && bitwidth <= HOST_BITS_PER_WIDE_INT
4649 && CONST_INT_P (XEXP (x, 1))
4650 && (UINTVAL (XEXP (x, 1))
4651 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4652 return num1;
4653
4654 /* Similarly for IOR when setting high-order bits. */
4655 if (code == IOR
4656 && num1 > 1
4657 && bitwidth <= HOST_BITS_PER_WIDE_INT
4658 && CONST_INT_P (XEXP (x, 1))
4659 && (UINTVAL (XEXP (x, 1))
4660 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4661 return num1;
4662
4663 return MIN (num0, num1);
4664
4665 case PLUS: case MINUS:
4666 /* For addition and subtraction, we can have a 1-bit carry. However,
4667 if we are subtracting 1 from a positive number, there will not
4668 be such a carry. Furthermore, if the positive number is known to
4669 be 0 or 1, we know the result is either -1 or 0. */
4670
4671 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4672 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4673 {
4674 nonzero = nonzero_bits (XEXP (x, 0), mode);
4675 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4676 return (nonzero == 1 || nonzero == 0 ? bitwidth
4677 : bitwidth - floor_log2 (nonzero) - 1);
4678 }
4679
4680 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4681 known_x, known_mode, known_ret);
4682 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4683 known_x, known_mode, known_ret);
4684 result = MAX (1, MIN (num0, num1) - 1);
4685
4686 return result;
4687
4688 case MULT:
4689 /* The number of bits of the product is the sum of the number of
4690 bits of both terms. However, unless one of the terms if known
4691 to be positive, we must allow for an additional bit since negating
4692 a negative number can remove one sign bit copy. */
4693
4694 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4695 known_x, known_mode, known_ret);
4696 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4697 known_x, known_mode, known_ret);
4698
4699 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4700 if (result > 0
4701 && (bitwidth > HOST_BITS_PER_WIDE_INT
4702 || (((nonzero_bits (XEXP (x, 0), mode)
4703 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4704 && ((nonzero_bits (XEXP (x, 1), mode)
4705 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4706 != 0))))
4707 result--;
4708
4709 return MAX (1, result);
4710
4711 case UDIV:
4712 /* The result must be <= the first operand. If the first operand
4713 has the high bit set, we know nothing about the number of sign
4714 bit copies. */
4715 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4716 return 1;
4717 else if ((nonzero_bits (XEXP (x, 0), mode)
4718 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4719 return 1;
4720 else
4721 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4722 known_x, known_mode, known_ret);
4723
4724 case UMOD:
4725 /* The result must be <= the second operand. If the second operand
4726 has (or just might have) the high bit set, we know nothing about
4727 the number of sign bit copies. */
4728 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4729 return 1;
4730 else if ((nonzero_bits (XEXP (x, 1), mode)
4731 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4732 return 1;
4733 else
4734 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4735 known_x, known_mode, known_ret);
4736
4737 case DIV:
4738 /* Similar to unsigned division, except that we have to worry about
4739 the case where the divisor is negative, in which case we have
4740 to add 1. */
4741 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4742 known_x, known_mode, known_ret);
4743 if (result > 1
4744 && (bitwidth > HOST_BITS_PER_WIDE_INT
4745 || (nonzero_bits (XEXP (x, 1), mode)
4746 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4747 result--;
4748
4749 return result;
4750
4751 case MOD:
4752 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4753 known_x, known_mode, known_ret);
4754 if (result > 1
4755 && (bitwidth > HOST_BITS_PER_WIDE_INT
4756 || (nonzero_bits (XEXP (x, 1), mode)
4757 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4758 result--;
4759
4760 return result;
4761
4762 case ASHIFTRT:
4763 /* Shifts by a constant add to the number of bits equal to the
4764 sign bit. */
4765 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4766 known_x, known_mode, known_ret);
4767 if (CONST_INT_P (XEXP (x, 1))
4768 && INTVAL (XEXP (x, 1)) > 0
4769 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4770 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4771
4772 return num0;
4773
4774 case ASHIFT:
4775 /* Left shifts destroy copies. */
4776 if (!CONST_INT_P (XEXP (x, 1))
4777 || INTVAL (XEXP (x, 1)) < 0
4778 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4779 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4780 return 1;
4781
4782 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4783 known_x, known_mode, known_ret);
4784 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4785
4786 case IF_THEN_ELSE:
4787 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4788 known_x, known_mode, known_ret);
4789 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4790 known_x, known_mode, known_ret);
4791 return MIN (num0, num1);
4792
4793 case EQ: case NE: case GE: case GT: case LE: case LT:
4794 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4795 case GEU: case GTU: case LEU: case LTU:
4796 case UNORDERED: case ORDERED:
4797 /* If the constant is negative, take its 1's complement and remask.
4798 Then see how many zero bits we have. */
4799 nonzero = STORE_FLAG_VALUE;
4800 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4801 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4802 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4803
4804 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4805
4806 default:
4807 break;
4808 }
4809
4810 /* If we haven't been able to figure it out by one of the above rules,
4811 see if some of the high-order bits are known to be zero. If so,
4812 count those bits and return one less than that amount. If we can't
4813 safely compute the mask for this mode, always return BITWIDTH. */
4814
4815 bitwidth = GET_MODE_PRECISION (mode);
4816 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4817 return 1;
4818
4819 nonzero = nonzero_bits (x, mode);
4820 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4821 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4822 }
4823
4824 /* Calculate the rtx_cost of a single instruction. A return value of
4825 zero indicates an instruction pattern without a known cost. */
4826
4827 int
4828 insn_rtx_cost (rtx pat, bool speed)
4829 {
4830 int i, cost;
4831 rtx set;
4832
4833 /* Extract the single set rtx from the instruction pattern.
4834 We can't use single_set since we only have the pattern. */
4835 if (GET_CODE (pat) == SET)
4836 set = pat;
4837 else if (GET_CODE (pat) == PARALLEL)
4838 {
4839 set = NULL_RTX;
4840 for (i = 0; i < XVECLEN (pat, 0); i++)
4841 {
4842 rtx x = XVECEXP (pat, 0, i);
4843 if (GET_CODE (x) == SET)
4844 {
4845 if (set)
4846 return 0;
4847 set = x;
4848 }
4849 }
4850 if (!set)
4851 return 0;
4852 }
4853 else
4854 return 0;
4855
4856 cost = set_src_cost (SET_SRC (set), speed);
4857 return cost > 0 ? cost : COSTS_N_INSNS (1);
4858 }
4859
4860 /* Given an insn INSN and condition COND, return the condition in a
4861 canonical form to simplify testing by callers. Specifically:
4862
4863 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4864 (2) Both operands will be machine operands; (cc0) will have been replaced.
4865 (3) If an operand is a constant, it will be the second operand.
4866 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4867 for GE, GEU, and LEU.
4868
4869 If the condition cannot be understood, or is an inequality floating-point
4870 comparison which needs to be reversed, 0 will be returned.
4871
4872 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4873
4874 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4875 insn used in locating the condition was found. If a replacement test
4876 of the condition is desired, it should be placed in front of that
4877 insn and we will be sure that the inputs are still valid.
4878
4879 If WANT_REG is nonzero, we wish the condition to be relative to that
4880 register, if possible. Therefore, do not canonicalize the condition
4881 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4882 to be a compare to a CC mode register.
4883
4884 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4885 and at INSN. */
4886
4887 rtx
4888 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4889 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4890 {
4891 enum rtx_code code;
4892 rtx prev = insn;
4893 const_rtx set;
4894 rtx tem;
4895 rtx op0, op1;
4896 int reverse_code = 0;
4897 enum machine_mode mode;
4898 basic_block bb = BLOCK_FOR_INSN (insn);
4899
4900 code = GET_CODE (cond);
4901 mode = GET_MODE (cond);
4902 op0 = XEXP (cond, 0);
4903 op1 = XEXP (cond, 1);
4904
4905 if (reverse)
4906 code = reversed_comparison_code (cond, insn);
4907 if (code == UNKNOWN)
4908 return 0;
4909
4910 if (earliest)
4911 *earliest = insn;
4912
4913 /* If we are comparing a register with zero, see if the register is set
4914 in the previous insn to a COMPARE or a comparison operation. Perform
4915 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4916 in cse.c */
4917
4918 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4919 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4920 && op1 == CONST0_RTX (GET_MODE (op0))
4921 && op0 != want_reg)
4922 {
4923 /* Set nonzero when we find something of interest. */
4924 rtx x = 0;
4925
4926 #ifdef HAVE_cc0
4927 /* If comparison with cc0, import actual comparison from compare
4928 insn. */
4929 if (op0 == cc0_rtx)
4930 {
4931 if ((prev = prev_nonnote_insn (prev)) == 0
4932 || !NONJUMP_INSN_P (prev)
4933 || (set = single_set (prev)) == 0
4934 || SET_DEST (set) != cc0_rtx)
4935 return 0;
4936
4937 op0 = SET_SRC (set);
4938 op1 = CONST0_RTX (GET_MODE (op0));
4939 if (earliest)
4940 *earliest = prev;
4941 }
4942 #endif
4943
4944 /* If this is a COMPARE, pick up the two things being compared. */
4945 if (GET_CODE (op0) == COMPARE)
4946 {
4947 op1 = XEXP (op0, 1);
4948 op0 = XEXP (op0, 0);
4949 continue;
4950 }
4951 else if (!REG_P (op0))
4952 break;
4953
4954 /* Go back to the previous insn. Stop if it is not an INSN. We also
4955 stop if it isn't a single set or if it has a REG_INC note because
4956 we don't want to bother dealing with it. */
4957
4958 prev = prev_nonnote_nondebug_insn (prev);
4959
4960 if (prev == 0
4961 || !NONJUMP_INSN_P (prev)
4962 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4963 /* In cfglayout mode, there do not have to be labels at the
4964 beginning of a block, or jumps at the end, so the previous
4965 conditions would not stop us when we reach bb boundary. */
4966 || BLOCK_FOR_INSN (prev) != bb)
4967 break;
4968
4969 set = set_of (op0, prev);
4970
4971 if (set
4972 && (GET_CODE (set) != SET
4973 || !rtx_equal_p (SET_DEST (set), op0)))
4974 break;
4975
4976 /* If this is setting OP0, get what it sets it to if it looks
4977 relevant. */
4978 if (set)
4979 {
4980 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4981 #ifdef FLOAT_STORE_FLAG_VALUE
4982 REAL_VALUE_TYPE fsfv;
4983 #endif
4984
4985 /* ??? We may not combine comparisons done in a CCmode with
4986 comparisons not done in a CCmode. This is to aid targets
4987 like Alpha that have an IEEE compliant EQ instruction, and
4988 a non-IEEE compliant BEQ instruction. The use of CCmode is
4989 actually artificial, simply to prevent the combination, but
4990 should not affect other platforms.
4991
4992 However, we must allow VOIDmode comparisons to match either
4993 CCmode or non-CCmode comparison, because some ports have
4994 modeless comparisons inside branch patterns.
4995
4996 ??? This mode check should perhaps look more like the mode check
4997 in simplify_comparison in combine. */
4998
4999 if ((GET_CODE (SET_SRC (set)) == COMPARE
5000 || (((code == NE
5001 || (code == LT
5002 && val_signbit_known_set_p (inner_mode,
5003 STORE_FLAG_VALUE))
5004 #ifdef FLOAT_STORE_FLAG_VALUE
5005 || (code == LT
5006 && SCALAR_FLOAT_MODE_P (inner_mode)
5007 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5008 REAL_VALUE_NEGATIVE (fsfv)))
5009 #endif
5010 ))
5011 && COMPARISON_P (SET_SRC (set))))
5012 && (((GET_MODE_CLASS (mode) == MODE_CC)
5013 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5014 || mode == VOIDmode || inner_mode == VOIDmode))
5015 x = SET_SRC (set);
5016 else if (((code == EQ
5017 || (code == GE
5018 && val_signbit_known_set_p (inner_mode,
5019 STORE_FLAG_VALUE))
5020 #ifdef FLOAT_STORE_FLAG_VALUE
5021 || (code == GE
5022 && SCALAR_FLOAT_MODE_P (inner_mode)
5023 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5024 REAL_VALUE_NEGATIVE (fsfv)))
5025 #endif
5026 ))
5027 && COMPARISON_P (SET_SRC (set))
5028 && (((GET_MODE_CLASS (mode) == MODE_CC)
5029 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5030 || mode == VOIDmode || inner_mode == VOIDmode))
5031
5032 {
5033 reverse_code = 1;
5034 x = SET_SRC (set);
5035 }
5036 else
5037 break;
5038 }
5039
5040 else if (reg_set_p (op0, prev))
5041 /* If this sets OP0, but not directly, we have to give up. */
5042 break;
5043
5044 if (x)
5045 {
5046 /* If the caller is expecting the condition to be valid at INSN,
5047 make sure X doesn't change before INSN. */
5048 if (valid_at_insn_p)
5049 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5050 break;
5051 if (COMPARISON_P (x))
5052 code = GET_CODE (x);
5053 if (reverse_code)
5054 {
5055 code = reversed_comparison_code (x, prev);
5056 if (code == UNKNOWN)
5057 return 0;
5058 reverse_code = 0;
5059 }
5060
5061 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5062 if (earliest)
5063 *earliest = prev;
5064 }
5065 }
5066
5067 /* If constant is first, put it last. */
5068 if (CONSTANT_P (op0))
5069 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5070
5071 /* If OP0 is the result of a comparison, we weren't able to find what
5072 was really being compared, so fail. */
5073 if (!allow_cc_mode
5074 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5075 return 0;
5076
5077 /* Canonicalize any ordered comparison with integers involving equality
5078 if we can do computations in the relevant mode and we do not
5079 overflow. */
5080
5081 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5082 && CONST_INT_P (op1)
5083 && GET_MODE (op0) != VOIDmode
5084 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5085 {
5086 HOST_WIDE_INT const_val = INTVAL (op1);
5087 unsigned HOST_WIDE_INT uconst_val = const_val;
5088 unsigned HOST_WIDE_INT max_val
5089 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5090
5091 switch (code)
5092 {
5093 case LE:
5094 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5095 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5096 break;
5097
5098 /* When cross-compiling, const_val might be sign-extended from
5099 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5100 case GE:
5101 if ((const_val & max_val)
5102 != ((unsigned HOST_WIDE_INT) 1
5103 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5104 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5105 break;
5106
5107 case LEU:
5108 if (uconst_val < max_val)
5109 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5110 break;
5111
5112 case GEU:
5113 if (uconst_val != 0)
5114 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5115 break;
5116
5117 default:
5118 break;
5119 }
5120 }
5121
5122 /* Never return CC0; return zero instead. */
5123 if (CC0_P (op0))
5124 return 0;
5125
5126 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5127 }
5128
5129 /* Given a jump insn JUMP, return the condition that will cause it to branch
5130 to its JUMP_LABEL. If the condition cannot be understood, or is an
5131 inequality floating-point comparison which needs to be reversed, 0 will
5132 be returned.
5133
5134 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5135 insn used in locating the condition was found. If a replacement test
5136 of the condition is desired, it should be placed in front of that
5137 insn and we will be sure that the inputs are still valid. If EARLIEST
5138 is null, the returned condition will be valid at INSN.
5139
5140 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5141 compare CC mode register.
5142
5143 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5144
5145 rtx
5146 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5147 {
5148 rtx cond;
5149 int reverse;
5150 rtx set;
5151
5152 /* If this is not a standard conditional jump, we can't parse it. */
5153 if (!JUMP_P (jump)
5154 || ! any_condjump_p (jump))
5155 return 0;
5156 set = pc_set (jump);
5157
5158 cond = XEXP (SET_SRC (set), 0);
5159
5160 /* If this branches to JUMP_LABEL when the condition is false, reverse
5161 the condition. */
5162 reverse
5163 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5164 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5165
5166 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5167 allow_cc_mode, valid_at_insn_p);
5168 }
5169
5170 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5171 TARGET_MODE_REP_EXTENDED.
5172
5173 Note that we assume that the property of
5174 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5175 narrower than mode B. I.e., if A is a mode narrower than B then in
5176 order to be able to operate on it in mode B, mode A needs to
5177 satisfy the requirements set by the representation of mode B. */
5178
5179 static void
5180 init_num_sign_bit_copies_in_rep (void)
5181 {
5182 enum machine_mode mode, in_mode;
5183
5184 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5185 in_mode = GET_MODE_WIDER_MODE (mode))
5186 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5187 mode = GET_MODE_WIDER_MODE (mode))
5188 {
5189 enum machine_mode i;
5190
5191 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5192 extends to the next widest mode. */
5193 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5194 || GET_MODE_WIDER_MODE (mode) == in_mode);
5195
5196 /* We are in in_mode. Count how many bits outside of mode
5197 have to be copies of the sign-bit. */
5198 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5199 {
5200 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5201
5202 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5203 /* We can only check sign-bit copies starting from the
5204 top-bit. In order to be able to check the bits we
5205 have already seen we pretend that subsequent bits
5206 have to be sign-bit copies too. */
5207 || num_sign_bit_copies_in_rep [in_mode][mode])
5208 num_sign_bit_copies_in_rep [in_mode][mode]
5209 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5210 }
5211 }
5212 }
5213
5214 /* Suppose that truncation from the machine mode of X to MODE is not a
5215 no-op. See if there is anything special about X so that we can
5216 assume it already contains a truncated value of MODE. */
5217
5218 bool
5219 truncated_to_mode (enum machine_mode mode, const_rtx x)
5220 {
5221 /* This register has already been used in MODE without explicit
5222 truncation. */
5223 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5224 return true;
5225
5226 /* See if we already satisfy the requirements of MODE. If yes we
5227 can just switch to MODE. */
5228 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5229 && (num_sign_bit_copies (x, GET_MODE (x))
5230 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5231 return true;
5232
5233 return false;
5234 }
5235 \f
5236 /* Initialize non_rtx_starting_operands, which is used to speed up
5237 for_each_rtx. */
5238 void
5239 init_rtlanal (void)
5240 {
5241 int i;
5242 for (i = 0; i < NUM_RTX_CODE; i++)
5243 {
5244 const char *format = GET_RTX_FORMAT (i);
5245 const char *first = strpbrk (format, "eEV");
5246 non_rtx_starting_operands[i] = first ? first - format : -1;
5247 }
5248
5249 init_num_sign_bit_copies_in_rep ();
5250 }
5251 \f
5252 /* Check whether this is a constant pool constant. */
5253 bool
5254 constant_pool_constant_p (rtx x)
5255 {
5256 x = avoid_constant_pool_reference (x);
5257 return CONST_DOUBLE_P (x);
5258 }
5259 \f
5260 /* If M is a bitmask that selects a field of low-order bits within an item but
5261 not the entire word, return the length of the field. Return -1 otherwise.
5262 M is used in machine mode MODE. */
5263
5264 int
5265 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5266 {
5267 if (mode != VOIDmode)
5268 {
5269 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5270 return -1;
5271 m &= GET_MODE_MASK (mode);
5272 }
5273
5274 return exact_log2 (m + 1);
5275 }
5276
5277 /* Return the mode of MEM's address. */
5278
5279 enum machine_mode
5280 get_address_mode (rtx mem)
5281 {
5282 enum machine_mode mode;
5283
5284 gcc_assert (MEM_P (mem));
5285 mode = GET_MODE (XEXP (mem, 0));
5286 if (mode != VOIDmode)
5287 return mode;
5288 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5289 }
5290 \f
5291 /* Split up a CONST_DOUBLE or integer constant rtx
5292 into two rtx's for single words,
5293 storing in *FIRST the word that comes first in memory in the target
5294 and in *SECOND the other. */
5295
5296 void
5297 split_double (rtx value, rtx *first, rtx *second)
5298 {
5299 if (CONST_INT_P (value))
5300 {
5301 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5302 {
5303 /* In this case the CONST_INT holds both target words.
5304 Extract the bits from it into two word-sized pieces.
5305 Sign extend each half to HOST_WIDE_INT. */
5306 unsigned HOST_WIDE_INT low, high;
5307 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5308 unsigned bits_per_word = BITS_PER_WORD;
5309
5310 /* Set sign_bit to the most significant bit of a word. */
5311 sign_bit = 1;
5312 sign_bit <<= bits_per_word - 1;
5313
5314 /* Set mask so that all bits of the word are set. We could
5315 have used 1 << BITS_PER_WORD instead of basing the
5316 calculation on sign_bit. However, on machines where
5317 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5318 compiler warning, even though the code would never be
5319 executed. */
5320 mask = sign_bit << 1;
5321 mask--;
5322
5323 /* Set sign_extend as any remaining bits. */
5324 sign_extend = ~mask;
5325
5326 /* Pick the lower word and sign-extend it. */
5327 low = INTVAL (value);
5328 low &= mask;
5329 if (low & sign_bit)
5330 low |= sign_extend;
5331
5332 /* Pick the higher word, shifted to the least significant
5333 bits, and sign-extend it. */
5334 high = INTVAL (value);
5335 high >>= bits_per_word - 1;
5336 high >>= 1;
5337 high &= mask;
5338 if (high & sign_bit)
5339 high |= sign_extend;
5340
5341 /* Store the words in the target machine order. */
5342 if (WORDS_BIG_ENDIAN)
5343 {
5344 *first = GEN_INT (high);
5345 *second = GEN_INT (low);
5346 }
5347 else
5348 {
5349 *first = GEN_INT (low);
5350 *second = GEN_INT (high);
5351 }
5352 }
5353 else
5354 {
5355 /* The rule for using CONST_INT for a wider mode
5356 is that we regard the value as signed.
5357 So sign-extend it. */
5358 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5359 if (WORDS_BIG_ENDIAN)
5360 {
5361 *first = high;
5362 *second = value;
5363 }
5364 else
5365 {
5366 *first = value;
5367 *second = high;
5368 }
5369 }
5370 }
5371 else if (!CONST_DOUBLE_P (value))
5372 {
5373 if (WORDS_BIG_ENDIAN)
5374 {
5375 *first = const0_rtx;
5376 *second = value;
5377 }
5378 else
5379 {
5380 *first = value;
5381 *second = const0_rtx;
5382 }
5383 }
5384 else if (GET_MODE (value) == VOIDmode
5385 /* This is the old way we did CONST_DOUBLE integers. */
5386 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5387 {
5388 /* In an integer, the words are defined as most and least significant.
5389 So order them by the target's convention. */
5390 if (WORDS_BIG_ENDIAN)
5391 {
5392 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5393 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5394 }
5395 else
5396 {
5397 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5398 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5399 }
5400 }
5401 else
5402 {
5403 REAL_VALUE_TYPE r;
5404 long l[2];
5405 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5406
5407 /* Note, this converts the REAL_VALUE_TYPE to the target's
5408 format, splits up the floating point double and outputs
5409 exactly 32 bits of it into each of l[0] and l[1] --
5410 not necessarily BITS_PER_WORD bits. */
5411 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5412
5413 /* If 32 bits is an entire word for the target, but not for the host,
5414 then sign-extend on the host so that the number will look the same
5415 way on the host that it would on the target. See for instance
5416 simplify_unary_operation. The #if is needed to avoid compiler
5417 warnings. */
5418
5419 #if HOST_BITS_PER_LONG > 32
5420 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5421 {
5422 if (l[0] & ((long) 1 << 31))
5423 l[0] |= ((long) (-1) << 32);
5424 if (l[1] & ((long) 1 << 31))
5425 l[1] |= ((long) (-1) << 32);
5426 }
5427 #endif
5428
5429 *first = GEN_INT (l[0]);
5430 *second = GEN_INT (l[1]);
5431 }
5432 }
5433
5434 /* Strip outer address "mutations" from LOC and return a pointer to the
5435 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5436 stripped expression there.
5437
5438 "Mutations" either convert between modes or apply some kind of
5439 alignment. */
5440
5441 rtx *
5442 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5443 {
5444 for (;;)
5445 {
5446 enum rtx_code code = GET_CODE (*loc);
5447 if (GET_RTX_CLASS (code) == RTX_UNARY)
5448 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5449 used to convert between pointer sizes. */
5450 loc = &XEXP (*loc, 0);
5451 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5452 /* (and ... (const_int -X)) is used to align to X bytes. */
5453 loc = &XEXP (*loc, 0);
5454 else if (code == SUBREG
5455 && !OBJECT_P (SUBREG_REG (*loc))
5456 && subreg_lowpart_p (*loc))
5457 /* (subreg (operator ...) ...) inside and is used for mode
5458 conversion too. */
5459 loc = &SUBREG_REG (*loc);
5460 else
5461 return loc;
5462 if (outer_code)
5463 *outer_code = code;
5464 }
5465 }
5466
5467 /* Return true if X must be a base rather than an index. */
5468
5469 static bool
5470 must_be_base_p (rtx x)
5471 {
5472 return GET_CODE (x) == LO_SUM;
5473 }
5474
5475 /* Return true if X must be an index rather than a base. */
5476
5477 static bool
5478 must_be_index_p (rtx x)
5479 {
5480 return GET_CODE (x) == MULT || GET_CODE (x) == ASHIFT;
5481 }
5482
5483 /* Set the segment part of address INFO to LOC, given that INNER is the
5484 unmutated value. */
5485
5486 static void
5487 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5488 {
5489 gcc_checking_assert (GET_CODE (*inner) == UNSPEC);
5490
5491 gcc_assert (!info->segment);
5492 info->segment = loc;
5493 info->segment_term = inner;
5494 }
5495
5496 /* Set the base part of address INFO to LOC, given that INNER is the
5497 unmutated value. */
5498
5499 static void
5500 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5501 {
5502 if (GET_CODE (*inner) == LO_SUM)
5503 inner = strip_address_mutations (&XEXP (*inner, 0));
5504 gcc_checking_assert (REG_P (*inner)
5505 || MEM_P (*inner)
5506 || GET_CODE (*inner) == SUBREG);
5507
5508 gcc_assert (!info->base);
5509 info->base = loc;
5510 info->base_term = inner;
5511 }
5512
5513 /* Set the index part of address INFO to LOC, given that INNER is the
5514 unmutated value. */
5515
5516 static void
5517 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5518 {
5519 if ((GET_CODE (*inner) == MULT || GET_CODE (*inner) == ASHIFT)
5520 && CONSTANT_P (XEXP (*inner, 1)))
5521 inner = strip_address_mutations (&XEXP (*inner, 0));
5522 gcc_checking_assert (REG_P (*inner)
5523 || MEM_P (*inner)
5524 || GET_CODE (*inner) == SUBREG);
5525
5526 gcc_assert (!info->index);
5527 info->index = loc;
5528 info->index_term = inner;
5529 }
5530
5531 /* Set the displacement part of address INFO to LOC, given that INNER
5532 is the constant term. */
5533
5534 static void
5535 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5536 {
5537 gcc_checking_assert (CONSTANT_P (*inner));
5538
5539 gcc_assert (!info->disp);
5540 info->disp = loc;
5541 info->disp_term = inner;
5542 }
5543
5544 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5545 rest of INFO accordingly. */
5546
5547 static void
5548 decompose_incdec_address (struct address_info *info)
5549 {
5550 info->autoinc_p = true;
5551
5552 rtx *base = &XEXP (*info->inner, 0);
5553 set_address_base (info, base, base);
5554 gcc_checking_assert (info->base == info->base_term);
5555
5556 /* These addresses are only valid when the size of the addressed
5557 value is known. */
5558 gcc_checking_assert (info->mode != VOIDmode);
5559 }
5560
5561 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5562 of INFO accordingly. */
5563
5564 static void
5565 decompose_automod_address (struct address_info *info)
5566 {
5567 info->autoinc_p = true;
5568
5569 rtx *base = &XEXP (*info->inner, 0);
5570 set_address_base (info, base, base);
5571 gcc_checking_assert (info->base == info->base_term);
5572
5573 rtx plus = XEXP (*info->inner, 1);
5574 gcc_assert (GET_CODE (plus) == PLUS);
5575
5576 info->base_term2 = &XEXP (plus, 0);
5577 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5578
5579 rtx *step = &XEXP (plus, 1);
5580 rtx *inner_step = strip_address_mutations (step);
5581 if (CONSTANT_P (*inner_step))
5582 set_address_disp (info, step, inner_step);
5583 else
5584 set_address_index (info, step, inner_step);
5585 }
5586
5587 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5588 values in [PTR, END). Return a pointer to the end of the used array. */
5589
5590 static rtx **
5591 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5592 {
5593 rtx x = *loc;
5594 if (GET_CODE (x) == PLUS)
5595 {
5596 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5597 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5598 }
5599 else
5600 {
5601 gcc_assert (ptr != end);
5602 *ptr++ = loc;
5603 }
5604 return ptr;
5605 }
5606
5607 /* Evaluate the likelihood of X being a base or index value, returning
5608 positive if it is likely to be a base, negative if it is likely to be
5609 an index, and 0 if we can't tell. Make the magnitude of the return
5610 value reflect the amount of confidence we have in the answer.
5611
5612 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5613
5614 static int
5615 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5616 enum rtx_code outer_code, enum rtx_code index_code)
5617 {
5618 /* See whether we can be certain. */
5619 if (must_be_base_p (x))
5620 return 3;
5621 if (must_be_index_p (x))
5622 return -3;
5623
5624 /* Believe *_POINTER unless the address shape requires otherwise. */
5625 if (REG_P (x) && REG_POINTER (x))
5626 return 2;
5627 if (MEM_P (x) && MEM_POINTER (x))
5628 return 2;
5629
5630 if (REG_P (x) && HARD_REGISTER_P (x))
5631 {
5632 /* X is a hard register. If it only fits one of the base
5633 or index classes, choose that interpretation. */
5634 int regno = REGNO (x);
5635 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5636 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5637 if (base_p != index_p)
5638 return base_p ? 1 : -1;
5639 }
5640 return 0;
5641 }
5642
5643 /* INFO->INNER describes a normal, non-automodified address.
5644 Fill in the rest of INFO accordingly. */
5645
5646 static void
5647 decompose_normal_address (struct address_info *info)
5648 {
5649 /* Treat the address as the sum of up to four values. */
5650 rtx *ops[4];
5651 size_t n_ops = extract_plus_operands (info->inner, ops,
5652 ops + ARRAY_SIZE (ops)) - ops;
5653
5654 /* If there is more than one component, any base component is in a PLUS. */
5655 if (n_ops > 1)
5656 info->base_outer_code = PLUS;
5657
5658 /* Separate the parts that contain a REG or MEM from those that don't.
5659 Record the latter in INFO and leave the former in OPS. */
5660 rtx *inner_ops[4];
5661 size_t out = 0;
5662 for (size_t in = 0; in < n_ops; ++in)
5663 {
5664 rtx *loc = ops[in];
5665 rtx *inner = strip_address_mutations (loc);
5666 if (CONSTANT_P (*inner))
5667 set_address_disp (info, loc, inner);
5668 else if (GET_CODE (*inner) == UNSPEC)
5669 set_address_segment (info, loc, inner);
5670 else
5671 {
5672 ops[out] = loc;
5673 inner_ops[out] = inner;
5674 ++out;
5675 }
5676 }
5677
5678 /* Classify the remaining OPS members as bases and indexes. */
5679 if (out == 1)
5680 {
5681 /* Assume that the remaining value is a base unless the shape
5682 requires otherwise. */
5683 if (!must_be_index_p (*inner_ops[0]))
5684 set_address_base (info, ops[0], inner_ops[0]);
5685 else
5686 set_address_index (info, ops[0], inner_ops[0]);
5687 }
5688 else if (out == 2)
5689 {
5690 /* In the event of a tie, assume the base comes first. */
5691 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5692 GET_CODE (*ops[1]))
5693 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5694 GET_CODE (*ops[0])))
5695 {
5696 set_address_base (info, ops[0], inner_ops[0]);
5697 set_address_index (info, ops[1], inner_ops[1]);
5698 }
5699 else
5700 {
5701 set_address_base (info, ops[1], inner_ops[1]);
5702 set_address_index (info, ops[0], inner_ops[0]);
5703 }
5704 }
5705 else
5706 gcc_assert (out == 0);
5707 }
5708
5709 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5710 or VOIDmode if not known. AS is the address space associated with LOC.
5711 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5712
5713 void
5714 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5715 addr_space_t as, enum rtx_code outer_code)
5716 {
5717 memset (info, 0, sizeof (*info));
5718 info->mode = mode;
5719 info->as = as;
5720 info->addr_outer_code = outer_code;
5721 info->outer = loc;
5722 info->inner = strip_address_mutations (loc, &outer_code);
5723 info->base_outer_code = outer_code;
5724 switch (GET_CODE (*info->inner))
5725 {
5726 case PRE_DEC:
5727 case PRE_INC:
5728 case POST_DEC:
5729 case POST_INC:
5730 decompose_incdec_address (info);
5731 break;
5732
5733 case PRE_MODIFY:
5734 case POST_MODIFY:
5735 decompose_automod_address (info);
5736 break;
5737
5738 default:
5739 decompose_normal_address (info);
5740 break;
5741 }
5742 }
5743
5744 /* Describe address operand LOC in INFO. */
5745
5746 void
5747 decompose_lea_address (struct address_info *info, rtx *loc)
5748 {
5749 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5750 }
5751
5752 /* Describe the address of MEM X in INFO. */
5753
5754 void
5755 decompose_mem_address (struct address_info *info, rtx x)
5756 {
5757 gcc_assert (MEM_P (x));
5758 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5759 MEM_ADDR_SPACE (x), MEM);
5760 }
5761
5762 /* Update INFO after a change to the address it describes. */
5763
5764 void
5765 update_address (struct address_info *info)
5766 {
5767 decompose_address (info, info->outer, info->mode, info->as,
5768 info->addr_outer_code);
5769 }
5770
5771 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5772 more complicated than that. */
5773
5774 HOST_WIDE_INT
5775 get_index_scale (const struct address_info *info)
5776 {
5777 rtx index = *info->index;
5778 if (GET_CODE (index) == MULT
5779 && CONST_INT_P (XEXP (index, 1))
5780 && info->index_term == &XEXP (index, 0))
5781 return INTVAL (XEXP (index, 1));
5782
5783 if (GET_CODE (index) == ASHIFT
5784 && CONST_INT_P (XEXP (index, 1))
5785 && info->index_term == &XEXP (index, 0))
5786 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
5787
5788 if (info->index == info->index_term)
5789 return 1;
5790
5791 return 0;
5792 }
5793
5794 /* Return the "index code" of INFO, in the form required by
5795 ok_for_base_p_1. */
5796
5797 enum rtx_code
5798 get_index_code (const struct address_info *info)
5799 {
5800 if (info->index)
5801 return GET_CODE (*info->index);
5802
5803 if (info->disp)
5804 return GET_CODE (*info->disp);
5805
5806 return SCRATCH;
5807 }