libitm.exp: Reorder lib loads into dependency order.
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 \f
81 /* Return 1 if the value of X is unstable
82 (would be different at a different point in the program).
83 The frame pointer, arg pointer, etc. are considered stable
84 (within one function) and so is anything marked `unchanging'. */
85
86 int
87 rtx_unstable_p (const_rtx x)
88 {
89 const RTX_CODE code = GET_CODE (x);
90 int i;
91 const char *fmt;
92
93 switch (code)
94 {
95 case MEM:
96 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
97
98 case CONST:
99 CASE_CONST_ANY:
100 case SYMBOL_REF:
101 case LABEL_REF:
102 return 0;
103
104 case REG:
105 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
106 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
107 /* The arg pointer varies if it is not a fixed register. */
108 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
109 return 0;
110 /* ??? When call-clobbered, the value is stable modulo the restore
111 that must happen after a call. This currently screws up local-alloc
112 into believing that the restore is not needed. */
113 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
114 return 0;
115 return 1;
116
117 case ASM_OPERANDS:
118 if (MEM_VOLATILE_P (x))
119 return 1;
120
121 /* Fall through. */
122
123 default:
124 break;
125 }
126
127 fmt = GET_RTX_FORMAT (code);
128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
129 if (fmt[i] == 'e')
130 {
131 if (rtx_unstable_p (XEXP (x, i)))
132 return 1;
133 }
134 else if (fmt[i] == 'E')
135 {
136 int j;
137 for (j = 0; j < XVECLEN (x, i); j++)
138 if (rtx_unstable_p (XVECEXP (x, i, j)))
139 return 1;
140 }
141
142 return 0;
143 }
144
145 /* Return 1 if X has a value that can vary even between two
146 executions of the program. 0 means X can be compared reliably
147 against certain constants or near-constants.
148 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
149 zero, we are slightly more conservative.
150 The frame pointer and the arg pointer are considered constant. */
151
152 bool
153 rtx_varies_p (const_rtx x, bool for_alias)
154 {
155 RTX_CODE code;
156 int i;
157 const char *fmt;
158
159 if (!x)
160 return 0;
161
162 code = GET_CODE (x);
163 switch (code)
164 {
165 case MEM:
166 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
167
168 case CONST:
169 CASE_CONST_ANY:
170 case SYMBOL_REF:
171 case LABEL_REF:
172 return 0;
173
174 case REG:
175 /* Note that we have to test for the actual rtx used for the frame
176 and arg pointers and not just the register number in case we have
177 eliminated the frame and/or arg pointer and are using it
178 for pseudos. */
179 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
180 /* The arg pointer varies if it is not a fixed register. */
181 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
182 return 0;
183 if (x == pic_offset_table_rtx
184 /* ??? When call-clobbered, the value is stable modulo the restore
185 that must happen after a call. This currently screws up
186 local-alloc into believing that the restore is not needed, so we
187 must return 0 only if we are called from alias analysis. */
188 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
189 return 0;
190 return 1;
191
192 case LO_SUM:
193 /* The operand 0 of a LO_SUM is considered constant
194 (in fact it is related specifically to operand 1)
195 during alias analysis. */
196 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
197 || rtx_varies_p (XEXP (x, 1), for_alias);
198
199 case ASM_OPERANDS:
200 if (MEM_VOLATILE_P (x))
201 return 1;
202
203 /* Fall through. */
204
205 default:
206 break;
207 }
208
209 fmt = GET_RTX_FORMAT (code);
210 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
211 if (fmt[i] == 'e')
212 {
213 if (rtx_varies_p (XEXP (x, i), for_alias))
214 return 1;
215 }
216 else if (fmt[i] == 'E')
217 {
218 int j;
219 for (j = 0; j < XVECLEN (x, i); j++)
220 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
221 return 1;
222 }
223
224 return 0;
225 }
226
227 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
228 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
229 whether nonzero is returned for unaligned memory accesses on strict
230 alignment machines. */
231
232 static int
233 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
234 enum machine_mode mode, bool unaligned_mems)
235 {
236 enum rtx_code code = GET_CODE (x);
237
238 if (STRICT_ALIGNMENT
239 && unaligned_mems
240 && GET_MODE_SIZE (mode) != 0)
241 {
242 HOST_WIDE_INT actual_offset = offset;
243 #ifdef SPARC_STACK_BOUNDARY_HACK
244 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
245 the real alignment of %sp. However, when it does this, the
246 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
247 if (SPARC_STACK_BOUNDARY_HACK
248 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
249 actual_offset -= STACK_POINTER_OFFSET;
250 #endif
251
252 if (actual_offset % GET_MODE_SIZE (mode) != 0)
253 return 1;
254 }
255
256 switch (code)
257 {
258 case SYMBOL_REF:
259 if (SYMBOL_REF_WEAK (x))
260 return 1;
261 if (!CONSTANT_POOL_ADDRESS_P (x))
262 {
263 tree decl;
264 HOST_WIDE_INT decl_size;
265
266 if (offset < 0)
267 return 1;
268 if (size == 0)
269 size = GET_MODE_SIZE (mode);
270 if (size == 0)
271 return offset != 0;
272
273 /* If the size of the access or of the symbol is unknown,
274 assume the worst. */
275 decl = SYMBOL_REF_DECL (x);
276
277 /* Else check that the access is in bounds. TODO: restructure
278 expr_size/tree_expr_size/int_expr_size and just use the latter. */
279 if (!decl)
280 decl_size = -1;
281 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
282 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
283 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
284 : -1);
285 else if (TREE_CODE (decl) == STRING_CST)
286 decl_size = TREE_STRING_LENGTH (decl);
287 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
288 decl_size = int_size_in_bytes (TREE_TYPE (decl));
289 else
290 decl_size = -1;
291
292 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
293 }
294
295 return 0;
296
297 case LABEL_REF:
298 return 0;
299
300 case REG:
301 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
302 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
303 || x == stack_pointer_rtx
304 /* The arg pointer varies if it is not a fixed register. */
305 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
306 return 0;
307 /* All of the virtual frame registers are stack references. */
308 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
309 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
310 return 0;
311 return 1;
312
313 case CONST:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
315 mode, unaligned_mems);
316
317 case PLUS:
318 /* An address is assumed not to trap if:
319 - it is the pic register plus a constant. */
320 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
321 return 0;
322
323 /* - or it is an address that can't trap plus a constant integer,
324 with the proper remainder modulo the mode size if we are
325 considering unaligned memory references. */
326 if (CONST_INT_P (XEXP (x, 1))
327 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
328 size, mode, unaligned_mems))
329 return 0;
330
331 return 1;
332
333 case LO_SUM:
334 case PRE_MODIFY:
335 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
336 mode, unaligned_mems);
337
338 case PRE_DEC:
339 case PRE_INC:
340 case POST_DEC:
341 case POST_INC:
342 case POST_MODIFY:
343 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
344 mode, unaligned_mems);
345
346 default:
347 break;
348 }
349
350 /* If it isn't one of the case above, it can cause a trap. */
351 return 1;
352 }
353
354 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
355
356 int
357 rtx_addr_can_trap_p (const_rtx x)
358 {
359 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
360 }
361
362 /* Return true if X is an address that is known to not be zero. */
363
364 bool
365 nonzero_address_p (const_rtx x)
366 {
367 const enum rtx_code code = GET_CODE (x);
368
369 switch (code)
370 {
371 case SYMBOL_REF:
372 return !SYMBOL_REF_WEAK (x);
373
374 case LABEL_REF:
375 return true;
376
377 case REG:
378 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
379 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
380 || x == stack_pointer_rtx
381 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
382 return true;
383 /* All of the virtual frame registers are stack references. */
384 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
385 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
386 return true;
387 return false;
388
389 case CONST:
390 return nonzero_address_p (XEXP (x, 0));
391
392 case PLUS:
393 /* Handle PIC references. */
394 if (XEXP (x, 0) == pic_offset_table_rtx
395 && CONSTANT_P (XEXP (x, 1)))
396 return true;
397 return false;
398
399 case PRE_MODIFY:
400 /* Similar to the above; allow positive offsets. Further, since
401 auto-inc is only allowed in memories, the register must be a
402 pointer. */
403 if (CONST_INT_P (XEXP (x, 1))
404 && INTVAL (XEXP (x, 1)) > 0)
405 return true;
406 return nonzero_address_p (XEXP (x, 0));
407
408 case PRE_INC:
409 /* Similarly. Further, the offset is always positive. */
410 return true;
411
412 case PRE_DEC:
413 case POST_DEC:
414 case POST_INC:
415 case POST_MODIFY:
416 return nonzero_address_p (XEXP (x, 0));
417
418 case LO_SUM:
419 return nonzero_address_p (XEXP (x, 1));
420
421 default:
422 break;
423 }
424
425 /* If it isn't one of the case above, might be zero. */
426 return false;
427 }
428
429 /* Return 1 if X refers to a memory location whose address
430 cannot be compared reliably with constant addresses,
431 or if X refers to a BLKmode memory object.
432 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
433 zero, we are slightly more conservative. */
434
435 bool
436 rtx_addr_varies_p (const_rtx x, bool for_alias)
437 {
438 enum rtx_code code;
439 int i;
440 const char *fmt;
441
442 if (x == 0)
443 return 0;
444
445 code = GET_CODE (x);
446 if (code == MEM)
447 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
448
449 fmt = GET_RTX_FORMAT (code);
450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
451 if (fmt[i] == 'e')
452 {
453 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
454 return 1;
455 }
456 else if (fmt[i] == 'E')
457 {
458 int j;
459 for (j = 0; j < XVECLEN (x, i); j++)
460 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
461 return 1;
462 }
463 return 0;
464 }
465 \f
466 /* Return the CALL in X if there is one. */
467
468 rtx
469 get_call_rtx_from (rtx x)
470 {
471 if (INSN_P (x))
472 x = PATTERN (x);
473 if (GET_CODE (x) == PARALLEL)
474 x = XVECEXP (x, 0, 0);
475 if (GET_CODE (x) == SET)
476 x = SET_SRC (x);
477 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
478 return x;
479 return NULL_RTX;
480 }
481 \f
482 /* Return the value of the integer term in X, if one is apparent;
483 otherwise return 0.
484 Only obvious integer terms are detected.
485 This is used in cse.c with the `related_value' field. */
486
487 HOST_WIDE_INT
488 get_integer_term (const_rtx x)
489 {
490 if (GET_CODE (x) == CONST)
491 x = XEXP (x, 0);
492
493 if (GET_CODE (x) == MINUS
494 && CONST_INT_P (XEXP (x, 1)))
495 return - INTVAL (XEXP (x, 1));
496 if (GET_CODE (x) == PLUS
497 && CONST_INT_P (XEXP (x, 1)))
498 return INTVAL (XEXP (x, 1));
499 return 0;
500 }
501
502 /* If X is a constant, return the value sans apparent integer term;
503 otherwise return 0.
504 Only obvious integer terms are detected. */
505
506 rtx
507 get_related_value (const_rtx x)
508 {
509 if (GET_CODE (x) != CONST)
510 return 0;
511 x = XEXP (x, 0);
512 if (GET_CODE (x) == PLUS
513 && CONST_INT_P (XEXP (x, 1)))
514 return XEXP (x, 0);
515 else if (GET_CODE (x) == MINUS
516 && CONST_INT_P (XEXP (x, 1)))
517 return XEXP (x, 0);
518 return 0;
519 }
520 \f
521 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
522 to somewhere in the same object or object_block as SYMBOL. */
523
524 bool
525 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
526 {
527 tree decl;
528
529 if (GET_CODE (symbol) != SYMBOL_REF)
530 return false;
531
532 if (offset == 0)
533 return true;
534
535 if (offset > 0)
536 {
537 if (CONSTANT_POOL_ADDRESS_P (symbol)
538 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
539 return true;
540
541 decl = SYMBOL_REF_DECL (symbol);
542 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
543 return true;
544 }
545
546 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
547 && SYMBOL_REF_BLOCK (symbol)
548 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
549 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
550 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
551 return true;
552
553 return false;
554 }
555
556 /* Split X into a base and a constant offset, storing them in *BASE_OUT
557 and *OFFSET_OUT respectively. */
558
559 void
560 split_const (rtx x, rtx *base_out, rtx *offset_out)
561 {
562 if (GET_CODE (x) == CONST)
563 {
564 x = XEXP (x, 0);
565 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
566 {
567 *base_out = XEXP (x, 0);
568 *offset_out = XEXP (x, 1);
569 return;
570 }
571 }
572 *base_out = x;
573 *offset_out = const0_rtx;
574 }
575 \f
576 /* Return the number of places FIND appears within X. If COUNT_DEST is
577 zero, we do not count occurrences inside the destination of a SET. */
578
579 int
580 count_occurrences (const_rtx x, const_rtx find, int count_dest)
581 {
582 int i, j;
583 enum rtx_code code;
584 const char *format_ptr;
585 int count;
586
587 if (x == find)
588 return 1;
589
590 code = GET_CODE (x);
591
592 switch (code)
593 {
594 case REG:
595 CASE_CONST_ANY:
596 case SYMBOL_REF:
597 case CODE_LABEL:
598 case PC:
599 case CC0:
600 return 0;
601
602 case EXPR_LIST:
603 count = count_occurrences (XEXP (x, 0), find, count_dest);
604 if (XEXP (x, 1))
605 count += count_occurrences (XEXP (x, 1), find, count_dest);
606 return count;
607
608 case MEM:
609 if (MEM_P (find) && rtx_equal_p (x, find))
610 return 1;
611 break;
612
613 case SET:
614 if (SET_DEST (x) == find && ! count_dest)
615 return count_occurrences (SET_SRC (x), find, count_dest);
616 break;
617
618 default:
619 break;
620 }
621
622 format_ptr = GET_RTX_FORMAT (code);
623 count = 0;
624
625 for (i = 0; i < GET_RTX_LENGTH (code); i++)
626 {
627 switch (*format_ptr++)
628 {
629 case 'e':
630 count += count_occurrences (XEXP (x, i), find, count_dest);
631 break;
632
633 case 'E':
634 for (j = 0; j < XVECLEN (x, i); j++)
635 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
636 break;
637 }
638 }
639 return count;
640 }
641
642 \f
643 /* Return TRUE if OP is a register or subreg of a register that
644 holds an unsigned quantity. Otherwise, return FALSE. */
645
646 bool
647 unsigned_reg_p (rtx op)
648 {
649 if (REG_P (op)
650 && REG_EXPR (op)
651 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
652 return true;
653
654 if (GET_CODE (op) == SUBREG
655 && SUBREG_PROMOTED_UNSIGNED_P (op))
656 return true;
657
658 return false;
659 }
660
661 \f
662 /* Nonzero if register REG appears somewhere within IN.
663 Also works if REG is not a register; in this case it checks
664 for a subexpression of IN that is Lisp "equal" to REG. */
665
666 int
667 reg_mentioned_p (const_rtx reg, const_rtx in)
668 {
669 const char *fmt;
670 int i;
671 enum rtx_code code;
672
673 if (in == 0)
674 return 0;
675
676 if (reg == in)
677 return 1;
678
679 if (GET_CODE (in) == LABEL_REF)
680 return reg == XEXP (in, 0);
681
682 code = GET_CODE (in);
683
684 switch (code)
685 {
686 /* Compare registers by number. */
687 case REG:
688 return REG_P (reg) && REGNO (in) == REGNO (reg);
689
690 /* These codes have no constituent expressions
691 and are unique. */
692 case SCRATCH:
693 case CC0:
694 case PC:
695 return 0;
696
697 CASE_CONST_ANY:
698 /* These are kept unique for a given value. */
699 return 0;
700
701 default:
702 break;
703 }
704
705 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
706 return 1;
707
708 fmt = GET_RTX_FORMAT (code);
709
710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
711 {
712 if (fmt[i] == 'E')
713 {
714 int j;
715 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
716 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
717 return 1;
718 }
719 else if (fmt[i] == 'e'
720 && reg_mentioned_p (reg, XEXP (in, i)))
721 return 1;
722 }
723 return 0;
724 }
725 \f
726 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
727 no CODE_LABEL insn. */
728
729 int
730 no_labels_between_p (const_rtx beg, const_rtx end)
731 {
732 rtx p;
733 if (beg == end)
734 return 0;
735 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
736 if (LABEL_P (p))
737 return 0;
738 return 1;
739 }
740
741 /* Nonzero if register REG is used in an insn between
742 FROM_INSN and TO_INSN (exclusive of those two). */
743
744 int
745 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
746 {
747 rtx insn;
748
749 if (from_insn == to_insn)
750 return 0;
751
752 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
753 if (NONDEBUG_INSN_P (insn)
754 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
755 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
756 return 1;
757 return 0;
758 }
759 \f
760 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
761 is entirely replaced by a new value and the only use is as a SET_DEST,
762 we do not consider it a reference. */
763
764 int
765 reg_referenced_p (const_rtx x, const_rtx body)
766 {
767 int i;
768
769 switch (GET_CODE (body))
770 {
771 case SET:
772 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
773 return 1;
774
775 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
776 of a REG that occupies all of the REG, the insn references X if
777 it is mentioned in the destination. */
778 if (GET_CODE (SET_DEST (body)) != CC0
779 && GET_CODE (SET_DEST (body)) != PC
780 && !REG_P (SET_DEST (body))
781 && ! (GET_CODE (SET_DEST (body)) == SUBREG
782 && REG_P (SUBREG_REG (SET_DEST (body)))
783 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
784 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
785 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
786 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
787 && reg_overlap_mentioned_p (x, SET_DEST (body)))
788 return 1;
789 return 0;
790
791 case ASM_OPERANDS:
792 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
793 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
794 return 1;
795 return 0;
796
797 case CALL:
798 case USE:
799 case IF_THEN_ELSE:
800 return reg_overlap_mentioned_p (x, body);
801
802 case TRAP_IF:
803 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
804
805 case PREFETCH:
806 return reg_overlap_mentioned_p (x, XEXP (body, 0));
807
808 case UNSPEC:
809 case UNSPEC_VOLATILE:
810 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
811 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
812 return 1;
813 return 0;
814
815 case PARALLEL:
816 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
817 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
818 return 1;
819 return 0;
820
821 case CLOBBER:
822 if (MEM_P (XEXP (body, 0)))
823 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
824 return 1;
825 return 0;
826
827 case COND_EXEC:
828 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
829 return 1;
830 return reg_referenced_p (x, COND_EXEC_CODE (body));
831
832 default:
833 return 0;
834 }
835 }
836 \f
837 /* Nonzero if register REG is set or clobbered in an insn between
838 FROM_INSN and TO_INSN (exclusive of those two). */
839
840 int
841 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
842 {
843 const_rtx insn;
844
845 if (from_insn == to_insn)
846 return 0;
847
848 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
849 if (INSN_P (insn) && reg_set_p (reg, insn))
850 return 1;
851 return 0;
852 }
853
854 /* Internals of reg_set_between_p. */
855 int
856 reg_set_p (const_rtx reg, const_rtx insn)
857 {
858 /* We can be passed an insn or part of one. If we are passed an insn,
859 check if a side-effect of the insn clobbers REG. */
860 if (INSN_P (insn)
861 && (FIND_REG_INC_NOTE (insn, reg)
862 || (CALL_P (insn)
863 && ((REG_P (reg)
864 && REGNO (reg) < FIRST_PSEUDO_REGISTER
865 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
866 GET_MODE (reg), REGNO (reg)))
867 || MEM_P (reg)
868 || find_reg_fusage (insn, CLOBBER, reg)))))
869 return 1;
870
871 return set_of (reg, insn) != NULL_RTX;
872 }
873
874 /* Similar to reg_set_between_p, but check all registers in X. Return 0
875 only if none of them are modified between START and END. Return 1 if
876 X contains a MEM; this routine does use memory aliasing. */
877
878 int
879 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
880 {
881 const enum rtx_code code = GET_CODE (x);
882 const char *fmt;
883 int i, j;
884 rtx insn;
885
886 if (start == end)
887 return 0;
888
889 switch (code)
890 {
891 CASE_CONST_ANY:
892 case CONST:
893 case SYMBOL_REF:
894 case LABEL_REF:
895 return 0;
896
897 case PC:
898 case CC0:
899 return 1;
900
901 case MEM:
902 if (modified_between_p (XEXP (x, 0), start, end))
903 return 1;
904 if (MEM_READONLY_P (x))
905 return 0;
906 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
907 if (memory_modified_in_insn_p (x, insn))
908 return 1;
909 return 0;
910 break;
911
912 case REG:
913 return reg_set_between_p (x, start, end);
914
915 default:
916 break;
917 }
918
919 fmt = GET_RTX_FORMAT (code);
920 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
921 {
922 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
923 return 1;
924
925 else if (fmt[i] == 'E')
926 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
927 if (modified_between_p (XVECEXP (x, i, j), start, end))
928 return 1;
929 }
930
931 return 0;
932 }
933
934 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
935 of them are modified in INSN. Return 1 if X contains a MEM; this routine
936 does use memory aliasing. */
937
938 int
939 modified_in_p (const_rtx x, const_rtx insn)
940 {
941 const enum rtx_code code = GET_CODE (x);
942 const char *fmt;
943 int i, j;
944
945 switch (code)
946 {
947 CASE_CONST_ANY:
948 case CONST:
949 case SYMBOL_REF:
950 case LABEL_REF:
951 return 0;
952
953 case PC:
954 case CC0:
955 return 1;
956
957 case MEM:
958 if (modified_in_p (XEXP (x, 0), insn))
959 return 1;
960 if (MEM_READONLY_P (x))
961 return 0;
962 if (memory_modified_in_insn_p (x, insn))
963 return 1;
964 return 0;
965 break;
966
967 case REG:
968 return reg_set_p (x, insn);
969
970 default:
971 break;
972 }
973
974 fmt = GET_RTX_FORMAT (code);
975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
976 {
977 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
978 return 1;
979
980 else if (fmt[i] == 'E')
981 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
982 if (modified_in_p (XVECEXP (x, i, j), insn))
983 return 1;
984 }
985
986 return 0;
987 }
988 \f
989 /* Helper function for set_of. */
990 struct set_of_data
991 {
992 const_rtx found;
993 const_rtx pat;
994 };
995
996 static void
997 set_of_1 (rtx x, const_rtx pat, void *data1)
998 {
999 struct set_of_data *const data = (struct set_of_data *) (data1);
1000 if (rtx_equal_p (x, data->pat)
1001 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1002 data->found = pat;
1003 }
1004
1005 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1006 (either directly or via STRICT_LOW_PART and similar modifiers). */
1007 const_rtx
1008 set_of (const_rtx pat, const_rtx insn)
1009 {
1010 struct set_of_data data;
1011 data.found = NULL_RTX;
1012 data.pat = pat;
1013 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1014 return data.found;
1015 }
1016
1017 /* This function, called through note_stores, collects sets and
1018 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1019 by DATA. */
1020 void
1021 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1022 {
1023 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1024 if (REG_P (x) && HARD_REGISTER_P (x))
1025 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1026 }
1027
1028 /* Examine INSN, and compute the set of hard registers written by it.
1029 Store it in *PSET. Should only be called after reload. */
1030 void
1031 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
1032 {
1033 rtx link;
1034
1035 CLEAR_HARD_REG_SET (*pset);
1036 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1037 if (CALL_P (insn))
1038 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1039 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1040 if (REG_NOTE_KIND (link) == REG_INC)
1041 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1042 }
1043
1044 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1045 static int
1046 record_hard_reg_uses_1 (rtx *px, void *data)
1047 {
1048 rtx x = *px;
1049 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1050
1051 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1052 {
1053 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1054 while (nregs-- > 0)
1055 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1056 }
1057 return 0;
1058 }
1059
1060 /* Like record_hard_reg_sets, but called through note_uses. */
1061 void
1062 record_hard_reg_uses (rtx *px, void *data)
1063 {
1064 for_each_rtx (px, record_hard_reg_uses_1, data);
1065 }
1066 \f
1067 /* Given an INSN, return a SET expression if this insn has only a single SET.
1068 It may also have CLOBBERs, USEs, or SET whose output
1069 will not be used, which we ignore. */
1070
1071 rtx
1072 single_set_2 (const_rtx insn, const_rtx pat)
1073 {
1074 rtx set = NULL;
1075 int set_verified = 1;
1076 int i;
1077
1078 if (GET_CODE (pat) == PARALLEL)
1079 {
1080 for (i = 0; i < XVECLEN (pat, 0); i++)
1081 {
1082 rtx sub = XVECEXP (pat, 0, i);
1083 switch (GET_CODE (sub))
1084 {
1085 case USE:
1086 case CLOBBER:
1087 break;
1088
1089 case SET:
1090 /* We can consider insns having multiple sets, where all
1091 but one are dead as single set insns. In common case
1092 only single set is present in the pattern so we want
1093 to avoid checking for REG_UNUSED notes unless necessary.
1094
1095 When we reach set first time, we just expect this is
1096 the single set we are looking for and only when more
1097 sets are found in the insn, we check them. */
1098 if (!set_verified)
1099 {
1100 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1101 && !side_effects_p (set))
1102 set = NULL;
1103 else
1104 set_verified = 1;
1105 }
1106 if (!set)
1107 set = sub, set_verified = 0;
1108 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1109 || side_effects_p (sub))
1110 return NULL_RTX;
1111 break;
1112
1113 default:
1114 return NULL_RTX;
1115 }
1116 }
1117 }
1118 return set;
1119 }
1120
1121 /* Given an INSN, return nonzero if it has more than one SET, else return
1122 zero. */
1123
1124 int
1125 multiple_sets (const_rtx insn)
1126 {
1127 int found;
1128 int i;
1129
1130 /* INSN must be an insn. */
1131 if (! INSN_P (insn))
1132 return 0;
1133
1134 /* Only a PARALLEL can have multiple SETs. */
1135 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1136 {
1137 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1138 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1139 {
1140 /* If we have already found a SET, then return now. */
1141 if (found)
1142 return 1;
1143 else
1144 found = 1;
1145 }
1146 }
1147
1148 /* Either zero or one SET. */
1149 return 0;
1150 }
1151 \f
1152 /* Return nonzero if the destination of SET equals the source
1153 and there are no side effects. */
1154
1155 int
1156 set_noop_p (const_rtx set)
1157 {
1158 rtx src = SET_SRC (set);
1159 rtx dst = SET_DEST (set);
1160
1161 if (dst == pc_rtx && src == pc_rtx)
1162 return 1;
1163
1164 if (MEM_P (dst) && MEM_P (src))
1165 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1166
1167 if (GET_CODE (dst) == ZERO_EXTRACT)
1168 return rtx_equal_p (XEXP (dst, 0), src)
1169 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1170 && !side_effects_p (src);
1171
1172 if (GET_CODE (dst) == STRICT_LOW_PART)
1173 dst = XEXP (dst, 0);
1174
1175 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1176 {
1177 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1178 return 0;
1179 src = SUBREG_REG (src);
1180 dst = SUBREG_REG (dst);
1181 }
1182
1183 return (REG_P (src) && REG_P (dst)
1184 && REGNO (src) == REGNO (dst));
1185 }
1186 \f
1187 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1188 value to itself. */
1189
1190 int
1191 noop_move_p (const_rtx insn)
1192 {
1193 rtx pat = PATTERN (insn);
1194
1195 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1196 return 1;
1197
1198 /* Insns carrying these notes are useful later on. */
1199 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1200 return 0;
1201
1202 /* Check the code to be executed for COND_EXEC. */
1203 if (GET_CODE (pat) == COND_EXEC)
1204 pat = COND_EXEC_CODE (pat);
1205
1206 if (GET_CODE (pat) == SET && set_noop_p (pat))
1207 return 1;
1208
1209 if (GET_CODE (pat) == PARALLEL)
1210 {
1211 int i;
1212 /* If nothing but SETs of registers to themselves,
1213 this insn can also be deleted. */
1214 for (i = 0; i < XVECLEN (pat, 0); i++)
1215 {
1216 rtx tem = XVECEXP (pat, 0, i);
1217
1218 if (GET_CODE (tem) == USE
1219 || GET_CODE (tem) == CLOBBER)
1220 continue;
1221
1222 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1223 return 0;
1224 }
1225
1226 return 1;
1227 }
1228 return 0;
1229 }
1230 \f
1231
1232 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1233 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1234 If the object was modified, if we hit a partial assignment to X, or hit a
1235 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1236 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1237 be the src. */
1238
1239 rtx
1240 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1241 {
1242 rtx p;
1243
1244 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1245 p = PREV_INSN (p))
1246 if (INSN_P (p))
1247 {
1248 rtx set = single_set (p);
1249 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1250
1251 if (set && rtx_equal_p (x, SET_DEST (set)))
1252 {
1253 rtx src = SET_SRC (set);
1254
1255 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1256 src = XEXP (note, 0);
1257
1258 if ((valid_to == NULL_RTX
1259 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1260 /* Reject hard registers because we don't usually want
1261 to use them; we'd rather use a pseudo. */
1262 && (! (REG_P (src)
1263 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1264 {
1265 *pinsn = p;
1266 return src;
1267 }
1268 }
1269
1270 /* If set in non-simple way, we don't have a value. */
1271 if (reg_set_p (x, p))
1272 break;
1273 }
1274
1275 return x;
1276 }
1277 \f
1278 /* Return nonzero if register in range [REGNO, ENDREGNO)
1279 appears either explicitly or implicitly in X
1280 other than being stored into.
1281
1282 References contained within the substructure at LOC do not count.
1283 LOC may be zero, meaning don't ignore anything. */
1284
1285 int
1286 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1287 rtx *loc)
1288 {
1289 int i;
1290 unsigned int x_regno;
1291 RTX_CODE code;
1292 const char *fmt;
1293
1294 repeat:
1295 /* The contents of a REG_NONNEG note is always zero, so we must come here
1296 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1297 if (x == 0)
1298 return 0;
1299
1300 code = GET_CODE (x);
1301
1302 switch (code)
1303 {
1304 case REG:
1305 x_regno = REGNO (x);
1306
1307 /* If we modifying the stack, frame, or argument pointer, it will
1308 clobber a virtual register. In fact, we could be more precise,
1309 but it isn't worth it. */
1310 if ((x_regno == STACK_POINTER_REGNUM
1311 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1312 || x_regno == ARG_POINTER_REGNUM
1313 #endif
1314 || x_regno == FRAME_POINTER_REGNUM)
1315 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1316 return 1;
1317
1318 return endregno > x_regno && regno < END_REGNO (x);
1319
1320 case SUBREG:
1321 /* If this is a SUBREG of a hard reg, we can see exactly which
1322 registers are being modified. Otherwise, handle normally. */
1323 if (REG_P (SUBREG_REG (x))
1324 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1325 {
1326 unsigned int inner_regno = subreg_regno (x);
1327 unsigned int inner_endregno
1328 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1329 ? subreg_nregs (x) : 1);
1330
1331 return endregno > inner_regno && regno < inner_endregno;
1332 }
1333 break;
1334
1335 case CLOBBER:
1336 case SET:
1337 if (&SET_DEST (x) != loc
1338 /* Note setting a SUBREG counts as referring to the REG it is in for
1339 a pseudo but not for hard registers since we can
1340 treat each word individually. */
1341 && ((GET_CODE (SET_DEST (x)) == SUBREG
1342 && loc != &SUBREG_REG (SET_DEST (x))
1343 && REG_P (SUBREG_REG (SET_DEST (x)))
1344 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1345 && refers_to_regno_p (regno, endregno,
1346 SUBREG_REG (SET_DEST (x)), loc))
1347 || (!REG_P (SET_DEST (x))
1348 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1349 return 1;
1350
1351 if (code == CLOBBER || loc == &SET_SRC (x))
1352 return 0;
1353 x = SET_SRC (x);
1354 goto repeat;
1355
1356 default:
1357 break;
1358 }
1359
1360 /* X does not match, so try its subexpressions. */
1361
1362 fmt = GET_RTX_FORMAT (code);
1363 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1364 {
1365 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1366 {
1367 if (i == 0)
1368 {
1369 x = XEXP (x, 0);
1370 goto repeat;
1371 }
1372 else
1373 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1374 return 1;
1375 }
1376 else if (fmt[i] == 'E')
1377 {
1378 int j;
1379 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1380 if (loc != &XVECEXP (x, i, j)
1381 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1382 return 1;
1383 }
1384 }
1385 return 0;
1386 }
1387
1388 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1389 we check if any register number in X conflicts with the relevant register
1390 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1391 contains a MEM (we don't bother checking for memory addresses that can't
1392 conflict because we expect this to be a rare case. */
1393
1394 int
1395 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1396 {
1397 unsigned int regno, endregno;
1398
1399 /* If either argument is a constant, then modifying X can not
1400 affect IN. Here we look at IN, we can profitably combine
1401 CONSTANT_P (x) with the switch statement below. */
1402 if (CONSTANT_P (in))
1403 return 0;
1404
1405 recurse:
1406 switch (GET_CODE (x))
1407 {
1408 case STRICT_LOW_PART:
1409 case ZERO_EXTRACT:
1410 case SIGN_EXTRACT:
1411 /* Overly conservative. */
1412 x = XEXP (x, 0);
1413 goto recurse;
1414
1415 case SUBREG:
1416 regno = REGNO (SUBREG_REG (x));
1417 if (regno < FIRST_PSEUDO_REGISTER)
1418 regno = subreg_regno (x);
1419 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1420 ? subreg_nregs (x) : 1);
1421 goto do_reg;
1422
1423 case REG:
1424 regno = REGNO (x);
1425 endregno = END_REGNO (x);
1426 do_reg:
1427 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1428
1429 case MEM:
1430 {
1431 const char *fmt;
1432 int i;
1433
1434 if (MEM_P (in))
1435 return 1;
1436
1437 fmt = GET_RTX_FORMAT (GET_CODE (in));
1438 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1439 if (fmt[i] == 'e')
1440 {
1441 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1442 return 1;
1443 }
1444 else if (fmt[i] == 'E')
1445 {
1446 int j;
1447 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1448 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1449 return 1;
1450 }
1451
1452 return 0;
1453 }
1454
1455 case SCRATCH:
1456 case PC:
1457 case CC0:
1458 return reg_mentioned_p (x, in);
1459
1460 case PARALLEL:
1461 {
1462 int i;
1463
1464 /* If any register in here refers to it we return true. */
1465 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1466 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1467 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1468 return 1;
1469 return 0;
1470 }
1471
1472 default:
1473 gcc_assert (CONSTANT_P (x));
1474 return 0;
1475 }
1476 }
1477 \f
1478 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1479 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1480 ignored by note_stores, but passed to FUN.
1481
1482 FUN receives three arguments:
1483 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1484 2. the SET or CLOBBER rtx that does the store,
1485 3. the pointer DATA provided to note_stores.
1486
1487 If the item being stored in or clobbered is a SUBREG of a hard register,
1488 the SUBREG will be passed. */
1489
1490 void
1491 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1492 {
1493 int i;
1494
1495 if (GET_CODE (x) == COND_EXEC)
1496 x = COND_EXEC_CODE (x);
1497
1498 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1499 {
1500 rtx dest = SET_DEST (x);
1501
1502 while ((GET_CODE (dest) == SUBREG
1503 && (!REG_P (SUBREG_REG (dest))
1504 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1505 || GET_CODE (dest) == ZERO_EXTRACT
1506 || GET_CODE (dest) == STRICT_LOW_PART)
1507 dest = XEXP (dest, 0);
1508
1509 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1510 each of whose first operand is a register. */
1511 if (GET_CODE (dest) == PARALLEL)
1512 {
1513 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1514 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1515 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1516 }
1517 else
1518 (*fun) (dest, x, data);
1519 }
1520
1521 else if (GET_CODE (x) == PARALLEL)
1522 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1523 note_stores (XVECEXP (x, 0, i), fun, data);
1524 }
1525 \f
1526 /* Like notes_stores, but call FUN for each expression that is being
1527 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1528 FUN for each expression, not any interior subexpressions. FUN receives a
1529 pointer to the expression and the DATA passed to this function.
1530
1531 Note that this is not quite the same test as that done in reg_referenced_p
1532 since that considers something as being referenced if it is being
1533 partially set, while we do not. */
1534
1535 void
1536 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1537 {
1538 rtx body = *pbody;
1539 int i;
1540
1541 switch (GET_CODE (body))
1542 {
1543 case COND_EXEC:
1544 (*fun) (&COND_EXEC_TEST (body), data);
1545 note_uses (&COND_EXEC_CODE (body), fun, data);
1546 return;
1547
1548 case PARALLEL:
1549 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1550 note_uses (&XVECEXP (body, 0, i), fun, data);
1551 return;
1552
1553 case SEQUENCE:
1554 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1555 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1556 return;
1557
1558 case USE:
1559 (*fun) (&XEXP (body, 0), data);
1560 return;
1561
1562 case ASM_OPERANDS:
1563 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1564 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1565 return;
1566
1567 case TRAP_IF:
1568 (*fun) (&TRAP_CONDITION (body), data);
1569 return;
1570
1571 case PREFETCH:
1572 (*fun) (&XEXP (body, 0), data);
1573 return;
1574
1575 case UNSPEC:
1576 case UNSPEC_VOLATILE:
1577 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1578 (*fun) (&XVECEXP (body, 0, i), data);
1579 return;
1580
1581 case CLOBBER:
1582 if (MEM_P (XEXP (body, 0)))
1583 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1584 return;
1585
1586 case SET:
1587 {
1588 rtx dest = SET_DEST (body);
1589
1590 /* For sets we replace everything in source plus registers in memory
1591 expression in store and operands of a ZERO_EXTRACT. */
1592 (*fun) (&SET_SRC (body), data);
1593
1594 if (GET_CODE (dest) == ZERO_EXTRACT)
1595 {
1596 (*fun) (&XEXP (dest, 1), data);
1597 (*fun) (&XEXP (dest, 2), data);
1598 }
1599
1600 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1601 dest = XEXP (dest, 0);
1602
1603 if (MEM_P (dest))
1604 (*fun) (&XEXP (dest, 0), data);
1605 }
1606 return;
1607
1608 default:
1609 /* All the other possibilities never store. */
1610 (*fun) (pbody, data);
1611 return;
1612 }
1613 }
1614 \f
1615 /* Return nonzero if X's old contents don't survive after INSN.
1616 This will be true if X is (cc0) or if X is a register and
1617 X dies in INSN or because INSN entirely sets X.
1618
1619 "Entirely set" means set directly and not through a SUBREG, or
1620 ZERO_EXTRACT, so no trace of the old contents remains.
1621 Likewise, REG_INC does not count.
1622
1623 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1624 but for this use that makes no difference, since regs don't overlap
1625 during their lifetimes. Therefore, this function may be used
1626 at any time after deaths have been computed.
1627
1628 If REG is a hard reg that occupies multiple machine registers, this
1629 function will only return 1 if each of those registers will be replaced
1630 by INSN. */
1631
1632 int
1633 dead_or_set_p (const_rtx insn, const_rtx x)
1634 {
1635 unsigned int regno, end_regno;
1636 unsigned int i;
1637
1638 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1639 if (GET_CODE (x) == CC0)
1640 return 1;
1641
1642 gcc_assert (REG_P (x));
1643
1644 regno = REGNO (x);
1645 end_regno = END_REGNO (x);
1646 for (i = regno; i < end_regno; i++)
1647 if (! dead_or_set_regno_p (insn, i))
1648 return 0;
1649
1650 return 1;
1651 }
1652
1653 /* Return TRUE iff DEST is a register or subreg of a register and
1654 doesn't change the number of words of the inner register, and any
1655 part of the register is TEST_REGNO. */
1656
1657 static bool
1658 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1659 {
1660 unsigned int regno, endregno;
1661
1662 if (GET_CODE (dest) == SUBREG
1663 && (((GET_MODE_SIZE (GET_MODE (dest))
1664 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1665 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1666 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1667 dest = SUBREG_REG (dest);
1668
1669 if (!REG_P (dest))
1670 return false;
1671
1672 regno = REGNO (dest);
1673 endregno = END_REGNO (dest);
1674 return (test_regno >= regno && test_regno < endregno);
1675 }
1676
1677 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1678 any member matches the covers_regno_no_parallel_p criteria. */
1679
1680 static bool
1681 covers_regno_p (const_rtx dest, unsigned int test_regno)
1682 {
1683 if (GET_CODE (dest) == PARALLEL)
1684 {
1685 /* Some targets place small structures in registers for return
1686 values of functions, and those registers are wrapped in
1687 PARALLELs that we may see as the destination of a SET. */
1688 int i;
1689
1690 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1691 {
1692 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1693 if (inner != NULL_RTX
1694 && covers_regno_no_parallel_p (inner, test_regno))
1695 return true;
1696 }
1697
1698 return false;
1699 }
1700 else
1701 return covers_regno_no_parallel_p (dest, test_regno);
1702 }
1703
1704 /* Utility function for dead_or_set_p to check an individual register. */
1705
1706 int
1707 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1708 {
1709 const_rtx pattern;
1710
1711 /* See if there is a death note for something that includes TEST_REGNO. */
1712 if (find_regno_note (insn, REG_DEAD, test_regno))
1713 return 1;
1714
1715 if (CALL_P (insn)
1716 && find_regno_fusage (insn, CLOBBER, test_regno))
1717 return 1;
1718
1719 pattern = PATTERN (insn);
1720
1721 /* If a COND_EXEC is not executed, the value survives. */
1722 if (GET_CODE (pattern) == COND_EXEC)
1723 return 0;
1724
1725 if (GET_CODE (pattern) == SET)
1726 return covers_regno_p (SET_DEST (pattern), test_regno);
1727 else if (GET_CODE (pattern) == PARALLEL)
1728 {
1729 int i;
1730
1731 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1732 {
1733 rtx body = XVECEXP (pattern, 0, i);
1734
1735 if (GET_CODE (body) == COND_EXEC)
1736 body = COND_EXEC_CODE (body);
1737
1738 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1739 && covers_regno_p (SET_DEST (body), test_regno))
1740 return 1;
1741 }
1742 }
1743
1744 return 0;
1745 }
1746
1747 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1748 If DATUM is nonzero, look for one whose datum is DATUM. */
1749
1750 rtx
1751 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1752 {
1753 rtx link;
1754
1755 gcc_checking_assert (insn);
1756
1757 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1758 if (! INSN_P (insn))
1759 return 0;
1760 if (datum == 0)
1761 {
1762 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1763 if (REG_NOTE_KIND (link) == kind)
1764 return link;
1765 return 0;
1766 }
1767
1768 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1769 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1770 return link;
1771 return 0;
1772 }
1773
1774 /* Return the reg-note of kind KIND in insn INSN which applies to register
1775 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1776 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1777 it might be the case that the note overlaps REGNO. */
1778
1779 rtx
1780 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1781 {
1782 rtx link;
1783
1784 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1785 if (! INSN_P (insn))
1786 return 0;
1787
1788 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1789 if (REG_NOTE_KIND (link) == kind
1790 /* Verify that it is a register, so that scratch and MEM won't cause a
1791 problem here. */
1792 && REG_P (XEXP (link, 0))
1793 && REGNO (XEXP (link, 0)) <= regno
1794 && END_REGNO (XEXP (link, 0)) > regno)
1795 return link;
1796 return 0;
1797 }
1798
1799 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1800 has such a note. */
1801
1802 rtx
1803 find_reg_equal_equiv_note (const_rtx insn)
1804 {
1805 rtx link;
1806
1807 if (!INSN_P (insn))
1808 return 0;
1809
1810 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1811 if (REG_NOTE_KIND (link) == REG_EQUAL
1812 || REG_NOTE_KIND (link) == REG_EQUIV)
1813 {
1814 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1815 insns that have multiple sets. Checking single_set to
1816 make sure of this is not the proper check, as explained
1817 in the comment in set_unique_reg_note.
1818
1819 This should be changed into an assert. */
1820 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1821 return 0;
1822 return link;
1823 }
1824 return NULL;
1825 }
1826
1827 /* Check whether INSN is a single_set whose source is known to be
1828 equivalent to a constant. Return that constant if so, otherwise
1829 return null. */
1830
1831 rtx
1832 find_constant_src (const_rtx insn)
1833 {
1834 rtx note, set, x;
1835
1836 set = single_set (insn);
1837 if (set)
1838 {
1839 x = avoid_constant_pool_reference (SET_SRC (set));
1840 if (CONSTANT_P (x))
1841 return x;
1842 }
1843
1844 note = find_reg_equal_equiv_note (insn);
1845 if (note && CONSTANT_P (XEXP (note, 0)))
1846 return XEXP (note, 0);
1847
1848 return NULL_RTX;
1849 }
1850
1851 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1852 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1853
1854 int
1855 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1856 {
1857 /* If it's not a CALL_INSN, it can't possibly have a
1858 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1859 if (!CALL_P (insn))
1860 return 0;
1861
1862 gcc_assert (datum);
1863
1864 if (!REG_P (datum))
1865 {
1866 rtx link;
1867
1868 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1869 link;
1870 link = XEXP (link, 1))
1871 if (GET_CODE (XEXP (link, 0)) == code
1872 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1873 return 1;
1874 }
1875 else
1876 {
1877 unsigned int regno = REGNO (datum);
1878
1879 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1880 to pseudo registers, so don't bother checking. */
1881
1882 if (regno < FIRST_PSEUDO_REGISTER)
1883 {
1884 unsigned int end_regno = END_HARD_REGNO (datum);
1885 unsigned int i;
1886
1887 for (i = regno; i < end_regno; i++)
1888 if (find_regno_fusage (insn, code, i))
1889 return 1;
1890 }
1891 }
1892
1893 return 0;
1894 }
1895
1896 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1897 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1898
1899 int
1900 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1901 {
1902 rtx link;
1903
1904 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1905 to pseudo registers, so don't bother checking. */
1906
1907 if (regno >= FIRST_PSEUDO_REGISTER
1908 || !CALL_P (insn) )
1909 return 0;
1910
1911 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1912 {
1913 rtx op, reg;
1914
1915 if (GET_CODE (op = XEXP (link, 0)) == code
1916 && REG_P (reg = XEXP (op, 0))
1917 && REGNO (reg) <= regno
1918 && END_HARD_REGNO (reg) > regno)
1919 return 1;
1920 }
1921
1922 return 0;
1923 }
1924
1925 \f
1926 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1927 stored as the pointer to the next register note. */
1928
1929 rtx
1930 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1931 {
1932 rtx note;
1933
1934 switch (kind)
1935 {
1936 case REG_CC_SETTER:
1937 case REG_CC_USER:
1938 case REG_LABEL_TARGET:
1939 case REG_LABEL_OPERAND:
1940 case REG_TM:
1941 /* These types of register notes use an INSN_LIST rather than an
1942 EXPR_LIST, so that copying is done right and dumps look
1943 better. */
1944 note = alloc_INSN_LIST (datum, list);
1945 PUT_REG_NOTE_KIND (note, kind);
1946 break;
1947
1948 default:
1949 note = alloc_EXPR_LIST (kind, datum, list);
1950 break;
1951 }
1952
1953 return note;
1954 }
1955
1956 /* Add register note with kind KIND and datum DATUM to INSN. */
1957
1958 void
1959 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1960 {
1961 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1962 }
1963
1964 /* Remove register note NOTE from the REG_NOTES of INSN. */
1965
1966 void
1967 remove_note (rtx insn, const_rtx note)
1968 {
1969 rtx link;
1970
1971 if (note == NULL_RTX)
1972 return;
1973
1974 if (REG_NOTES (insn) == note)
1975 REG_NOTES (insn) = XEXP (note, 1);
1976 else
1977 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1978 if (XEXP (link, 1) == note)
1979 {
1980 XEXP (link, 1) = XEXP (note, 1);
1981 break;
1982 }
1983
1984 switch (REG_NOTE_KIND (note))
1985 {
1986 case REG_EQUAL:
1987 case REG_EQUIV:
1988 df_notes_rescan (insn);
1989 break;
1990 default:
1991 break;
1992 }
1993 }
1994
1995 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1996
1997 void
1998 remove_reg_equal_equiv_notes (rtx insn)
1999 {
2000 rtx *loc;
2001
2002 loc = &REG_NOTES (insn);
2003 while (*loc)
2004 {
2005 enum reg_note kind = REG_NOTE_KIND (*loc);
2006 if (kind == REG_EQUAL || kind == REG_EQUIV)
2007 *loc = XEXP (*loc, 1);
2008 else
2009 loc = &XEXP (*loc, 1);
2010 }
2011 }
2012
2013 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2014
2015 void
2016 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2017 {
2018 df_ref eq_use;
2019
2020 if (!df)
2021 return;
2022
2023 /* This loop is a little tricky. We cannot just go down the chain because
2024 it is being modified by some actions in the loop. So we just iterate
2025 over the head. We plan to drain the list anyway. */
2026 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2027 {
2028 rtx insn = DF_REF_INSN (eq_use);
2029 rtx note = find_reg_equal_equiv_note (insn);
2030
2031 /* This assert is generally triggered when someone deletes a REG_EQUAL
2032 or REG_EQUIV note by hacking the list manually rather than calling
2033 remove_note. */
2034 gcc_assert (note);
2035
2036 remove_note (insn, note);
2037 }
2038 }
2039
2040 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2041 return 1 if it is found. A simple equality test is used to determine if
2042 NODE matches. */
2043
2044 int
2045 in_expr_list_p (const_rtx listp, const_rtx node)
2046 {
2047 const_rtx x;
2048
2049 for (x = listp; x; x = XEXP (x, 1))
2050 if (node == XEXP (x, 0))
2051 return 1;
2052
2053 return 0;
2054 }
2055
2056 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2057 remove that entry from the list if it is found.
2058
2059 A simple equality test is used to determine if NODE matches. */
2060
2061 void
2062 remove_node_from_expr_list (const_rtx node, rtx *listp)
2063 {
2064 rtx temp = *listp;
2065 rtx prev = NULL_RTX;
2066
2067 while (temp)
2068 {
2069 if (node == XEXP (temp, 0))
2070 {
2071 /* Splice the node out of the list. */
2072 if (prev)
2073 XEXP (prev, 1) = XEXP (temp, 1);
2074 else
2075 *listp = XEXP (temp, 1);
2076
2077 return;
2078 }
2079
2080 prev = temp;
2081 temp = XEXP (temp, 1);
2082 }
2083 }
2084 \f
2085 /* Nonzero if X contains any volatile instructions. These are instructions
2086 which may cause unpredictable machine state instructions, and thus no
2087 instructions or register uses should be moved or combined across them.
2088 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2089
2090 int
2091 volatile_insn_p (const_rtx x)
2092 {
2093 const RTX_CODE code = GET_CODE (x);
2094 switch (code)
2095 {
2096 case LABEL_REF:
2097 case SYMBOL_REF:
2098 case CONST:
2099 CASE_CONST_ANY:
2100 case CC0:
2101 case PC:
2102 case REG:
2103 case SCRATCH:
2104 case CLOBBER:
2105 case ADDR_VEC:
2106 case ADDR_DIFF_VEC:
2107 case CALL:
2108 case MEM:
2109 return 0;
2110
2111 case UNSPEC_VOLATILE:
2112 return 1;
2113
2114 case ASM_INPUT:
2115 case ASM_OPERANDS:
2116 if (MEM_VOLATILE_P (x))
2117 return 1;
2118
2119 default:
2120 break;
2121 }
2122
2123 /* Recursively scan the operands of this expression. */
2124
2125 {
2126 const char *const fmt = GET_RTX_FORMAT (code);
2127 int i;
2128
2129 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2130 {
2131 if (fmt[i] == 'e')
2132 {
2133 if (volatile_insn_p (XEXP (x, i)))
2134 return 1;
2135 }
2136 else if (fmt[i] == 'E')
2137 {
2138 int j;
2139 for (j = 0; j < XVECLEN (x, i); j++)
2140 if (volatile_insn_p (XVECEXP (x, i, j)))
2141 return 1;
2142 }
2143 }
2144 }
2145 return 0;
2146 }
2147
2148 /* Nonzero if X contains any volatile memory references
2149 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2150
2151 int
2152 volatile_refs_p (const_rtx x)
2153 {
2154 const RTX_CODE code = GET_CODE (x);
2155 switch (code)
2156 {
2157 case LABEL_REF:
2158 case SYMBOL_REF:
2159 case CONST:
2160 CASE_CONST_ANY:
2161 case CC0:
2162 case PC:
2163 case REG:
2164 case SCRATCH:
2165 case CLOBBER:
2166 case ADDR_VEC:
2167 case ADDR_DIFF_VEC:
2168 return 0;
2169
2170 case UNSPEC_VOLATILE:
2171 return 1;
2172
2173 case MEM:
2174 case ASM_INPUT:
2175 case ASM_OPERANDS:
2176 if (MEM_VOLATILE_P (x))
2177 return 1;
2178
2179 default:
2180 break;
2181 }
2182
2183 /* Recursively scan the operands of this expression. */
2184
2185 {
2186 const char *const fmt = GET_RTX_FORMAT (code);
2187 int i;
2188
2189 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2190 {
2191 if (fmt[i] == 'e')
2192 {
2193 if (volatile_refs_p (XEXP (x, i)))
2194 return 1;
2195 }
2196 else if (fmt[i] == 'E')
2197 {
2198 int j;
2199 for (j = 0; j < XVECLEN (x, i); j++)
2200 if (volatile_refs_p (XVECEXP (x, i, j)))
2201 return 1;
2202 }
2203 }
2204 }
2205 return 0;
2206 }
2207
2208 /* Similar to above, except that it also rejects register pre- and post-
2209 incrementing. */
2210
2211 int
2212 side_effects_p (const_rtx x)
2213 {
2214 const RTX_CODE code = GET_CODE (x);
2215 switch (code)
2216 {
2217 case LABEL_REF:
2218 case SYMBOL_REF:
2219 case CONST:
2220 CASE_CONST_ANY:
2221 case CC0:
2222 case PC:
2223 case REG:
2224 case SCRATCH:
2225 case ADDR_VEC:
2226 case ADDR_DIFF_VEC:
2227 case VAR_LOCATION:
2228 return 0;
2229
2230 case CLOBBER:
2231 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2232 when some combination can't be done. If we see one, don't think
2233 that we can simplify the expression. */
2234 return (GET_MODE (x) != VOIDmode);
2235
2236 case PRE_INC:
2237 case PRE_DEC:
2238 case POST_INC:
2239 case POST_DEC:
2240 case PRE_MODIFY:
2241 case POST_MODIFY:
2242 case CALL:
2243 case UNSPEC_VOLATILE:
2244 return 1;
2245
2246 case MEM:
2247 case ASM_INPUT:
2248 case ASM_OPERANDS:
2249 if (MEM_VOLATILE_P (x))
2250 return 1;
2251
2252 default:
2253 break;
2254 }
2255
2256 /* Recursively scan the operands of this expression. */
2257
2258 {
2259 const char *fmt = GET_RTX_FORMAT (code);
2260 int i;
2261
2262 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2263 {
2264 if (fmt[i] == 'e')
2265 {
2266 if (side_effects_p (XEXP (x, i)))
2267 return 1;
2268 }
2269 else if (fmt[i] == 'E')
2270 {
2271 int j;
2272 for (j = 0; j < XVECLEN (x, i); j++)
2273 if (side_effects_p (XVECEXP (x, i, j)))
2274 return 1;
2275 }
2276 }
2277 }
2278 return 0;
2279 }
2280 \f
2281 /* Return nonzero if evaluating rtx X might cause a trap.
2282 FLAGS controls how to consider MEMs. A nonzero means the context
2283 of the access may have changed from the original, such that the
2284 address may have become invalid. */
2285
2286 int
2287 may_trap_p_1 (const_rtx x, unsigned flags)
2288 {
2289 int i;
2290 enum rtx_code code;
2291 const char *fmt;
2292
2293 /* We make no distinction currently, but this function is part of
2294 the internal target-hooks ABI so we keep the parameter as
2295 "unsigned flags". */
2296 bool code_changed = flags != 0;
2297
2298 if (x == 0)
2299 return 0;
2300 code = GET_CODE (x);
2301 switch (code)
2302 {
2303 /* Handle these cases quickly. */
2304 CASE_CONST_ANY:
2305 case SYMBOL_REF:
2306 case LABEL_REF:
2307 case CONST:
2308 case PC:
2309 case CC0:
2310 case REG:
2311 case SCRATCH:
2312 return 0;
2313
2314 case UNSPEC:
2315 return targetm.unspec_may_trap_p (x, flags);
2316
2317 case UNSPEC_VOLATILE:
2318 case ASM_INPUT:
2319 case TRAP_IF:
2320 return 1;
2321
2322 case ASM_OPERANDS:
2323 return MEM_VOLATILE_P (x);
2324
2325 /* Memory ref can trap unless it's a static var or a stack slot. */
2326 case MEM:
2327 /* Recognize specific pattern of stack checking probes. */
2328 if (flag_stack_check
2329 && MEM_VOLATILE_P (x)
2330 && XEXP (x, 0) == stack_pointer_rtx)
2331 return 1;
2332 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2333 reference; moving it out of context such as when moving code
2334 when optimizing, might cause its address to become invalid. */
2335 code_changed
2336 || !MEM_NOTRAP_P (x))
2337 {
2338 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2339 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2340 GET_MODE (x), code_changed);
2341 }
2342
2343 return 0;
2344
2345 /* Division by a non-constant might trap. */
2346 case DIV:
2347 case MOD:
2348 case UDIV:
2349 case UMOD:
2350 if (HONOR_SNANS (GET_MODE (x)))
2351 return 1;
2352 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2353 return flag_trapping_math;
2354 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2355 return 1;
2356 break;
2357
2358 case EXPR_LIST:
2359 /* An EXPR_LIST is used to represent a function call. This
2360 certainly may trap. */
2361 return 1;
2362
2363 case GE:
2364 case GT:
2365 case LE:
2366 case LT:
2367 case LTGT:
2368 case COMPARE:
2369 /* Some floating point comparisons may trap. */
2370 if (!flag_trapping_math)
2371 break;
2372 /* ??? There is no machine independent way to check for tests that trap
2373 when COMPARE is used, though many targets do make this distinction.
2374 For instance, sparc uses CCFPE for compares which generate exceptions
2375 and CCFP for compares which do not generate exceptions. */
2376 if (HONOR_NANS (GET_MODE (x)))
2377 return 1;
2378 /* But often the compare has some CC mode, so check operand
2379 modes as well. */
2380 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2381 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2382 return 1;
2383 break;
2384
2385 case EQ:
2386 case NE:
2387 if (HONOR_SNANS (GET_MODE (x)))
2388 return 1;
2389 /* Often comparison is CC mode, so check operand modes. */
2390 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2391 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2392 return 1;
2393 break;
2394
2395 case FIX:
2396 /* Conversion of floating point might trap. */
2397 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2398 return 1;
2399 break;
2400
2401 case NEG:
2402 case ABS:
2403 case SUBREG:
2404 /* These operations don't trap even with floating point. */
2405 break;
2406
2407 default:
2408 /* Any floating arithmetic may trap. */
2409 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2410 return 1;
2411 }
2412
2413 fmt = GET_RTX_FORMAT (code);
2414 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2415 {
2416 if (fmt[i] == 'e')
2417 {
2418 if (may_trap_p_1 (XEXP (x, i), flags))
2419 return 1;
2420 }
2421 else if (fmt[i] == 'E')
2422 {
2423 int j;
2424 for (j = 0; j < XVECLEN (x, i); j++)
2425 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2426 return 1;
2427 }
2428 }
2429 return 0;
2430 }
2431
2432 /* Return nonzero if evaluating rtx X might cause a trap. */
2433
2434 int
2435 may_trap_p (const_rtx x)
2436 {
2437 return may_trap_p_1 (x, 0);
2438 }
2439
2440 /* Same as above, but additionally return nonzero if evaluating rtx X might
2441 cause a fault. We define a fault for the purpose of this function as a
2442 erroneous execution condition that cannot be encountered during the normal
2443 execution of a valid program; the typical example is an unaligned memory
2444 access on a strict alignment machine. The compiler guarantees that it
2445 doesn't generate code that will fault from a valid program, but this
2446 guarantee doesn't mean anything for individual instructions. Consider
2447 the following example:
2448
2449 struct S { int d; union { char *cp; int *ip; }; };
2450
2451 int foo(struct S *s)
2452 {
2453 if (s->d == 1)
2454 return *s->ip;
2455 else
2456 return *s->cp;
2457 }
2458
2459 on a strict alignment machine. In a valid program, foo will never be
2460 invoked on a structure for which d is equal to 1 and the underlying
2461 unique field of the union not aligned on a 4-byte boundary, but the
2462 expression *s->ip might cause a fault if considered individually.
2463
2464 At the RTL level, potentially problematic expressions will almost always
2465 verify may_trap_p; for example, the above dereference can be emitted as
2466 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2467 However, suppose that foo is inlined in a caller that causes s->cp to
2468 point to a local character variable and guarantees that s->d is not set
2469 to 1; foo may have been effectively translated into pseudo-RTL as:
2470
2471 if ((reg:SI) == 1)
2472 (set (reg:SI) (mem:SI (%fp - 7)))
2473 else
2474 (set (reg:QI) (mem:QI (%fp - 7)))
2475
2476 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2477 memory reference to a stack slot, but it will certainly cause a fault
2478 on a strict alignment machine. */
2479
2480 int
2481 may_trap_or_fault_p (const_rtx x)
2482 {
2483 return may_trap_p_1 (x, 1);
2484 }
2485 \f
2486 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2487 i.e., an inequality. */
2488
2489 int
2490 inequality_comparisons_p (const_rtx x)
2491 {
2492 const char *fmt;
2493 int len, i;
2494 const enum rtx_code code = GET_CODE (x);
2495
2496 switch (code)
2497 {
2498 case REG:
2499 case SCRATCH:
2500 case PC:
2501 case CC0:
2502 CASE_CONST_ANY:
2503 case CONST:
2504 case LABEL_REF:
2505 case SYMBOL_REF:
2506 return 0;
2507
2508 case LT:
2509 case LTU:
2510 case GT:
2511 case GTU:
2512 case LE:
2513 case LEU:
2514 case GE:
2515 case GEU:
2516 return 1;
2517
2518 default:
2519 break;
2520 }
2521
2522 len = GET_RTX_LENGTH (code);
2523 fmt = GET_RTX_FORMAT (code);
2524
2525 for (i = 0; i < len; i++)
2526 {
2527 if (fmt[i] == 'e')
2528 {
2529 if (inequality_comparisons_p (XEXP (x, i)))
2530 return 1;
2531 }
2532 else if (fmt[i] == 'E')
2533 {
2534 int j;
2535 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2536 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2537 return 1;
2538 }
2539 }
2540
2541 return 0;
2542 }
2543 \f
2544 /* Replace any occurrence of FROM in X with TO. The function does
2545 not enter into CONST_DOUBLE for the replace.
2546
2547 Note that copying is not done so X must not be shared unless all copies
2548 are to be modified. */
2549
2550 rtx
2551 replace_rtx (rtx x, rtx from, rtx to)
2552 {
2553 int i, j;
2554 const char *fmt;
2555
2556 if (x == from)
2557 return to;
2558
2559 /* Allow this function to make replacements in EXPR_LISTs. */
2560 if (x == 0)
2561 return 0;
2562
2563 if (GET_CODE (x) == SUBREG)
2564 {
2565 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2566
2567 if (CONST_INT_P (new_rtx))
2568 {
2569 x = simplify_subreg (GET_MODE (x), new_rtx,
2570 GET_MODE (SUBREG_REG (x)),
2571 SUBREG_BYTE (x));
2572 gcc_assert (x);
2573 }
2574 else
2575 SUBREG_REG (x) = new_rtx;
2576
2577 return x;
2578 }
2579 else if (GET_CODE (x) == ZERO_EXTEND)
2580 {
2581 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2582
2583 if (CONST_INT_P (new_rtx))
2584 {
2585 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2586 new_rtx, GET_MODE (XEXP (x, 0)));
2587 gcc_assert (x);
2588 }
2589 else
2590 XEXP (x, 0) = new_rtx;
2591
2592 return x;
2593 }
2594
2595 fmt = GET_RTX_FORMAT (GET_CODE (x));
2596 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2597 {
2598 if (fmt[i] == 'e')
2599 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2600 else if (fmt[i] == 'E')
2601 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2602 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2603 }
2604
2605 return x;
2606 }
2607 \f
2608 /* Replace occurrences of the old label in *X with the new one.
2609 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2610
2611 int
2612 replace_label (rtx *x, void *data)
2613 {
2614 rtx l = *x;
2615 rtx old_label = ((replace_label_data *) data)->r1;
2616 rtx new_label = ((replace_label_data *) data)->r2;
2617 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2618
2619 if (l == NULL_RTX)
2620 return 0;
2621
2622 if (GET_CODE (l) == SYMBOL_REF
2623 && CONSTANT_POOL_ADDRESS_P (l))
2624 {
2625 rtx c = get_pool_constant (l);
2626 if (rtx_referenced_p (old_label, c))
2627 {
2628 rtx new_c, new_l;
2629 replace_label_data *d = (replace_label_data *) data;
2630
2631 /* Create a copy of constant C; replace the label inside
2632 but do not update LABEL_NUSES because uses in constant pool
2633 are not counted. */
2634 new_c = copy_rtx (c);
2635 d->update_label_nuses = false;
2636 for_each_rtx (&new_c, replace_label, data);
2637 d->update_label_nuses = update_label_nuses;
2638
2639 /* Add the new constant NEW_C to constant pool and replace
2640 the old reference to constant by new reference. */
2641 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2642 *x = replace_rtx (l, l, new_l);
2643 }
2644 return 0;
2645 }
2646
2647 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2648 field. This is not handled by for_each_rtx because it doesn't
2649 handle unprinted ('0') fields. */
2650 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2651 JUMP_LABEL (l) = new_label;
2652
2653 if ((GET_CODE (l) == LABEL_REF
2654 || GET_CODE (l) == INSN_LIST)
2655 && XEXP (l, 0) == old_label)
2656 {
2657 XEXP (l, 0) = new_label;
2658 if (update_label_nuses)
2659 {
2660 ++LABEL_NUSES (new_label);
2661 --LABEL_NUSES (old_label);
2662 }
2663 return 0;
2664 }
2665
2666 return 0;
2667 }
2668
2669 /* When *BODY is equal to X or X is directly referenced by *BODY
2670 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2671 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2672
2673 static int
2674 rtx_referenced_p_1 (rtx *body, void *x)
2675 {
2676 rtx y = (rtx) x;
2677
2678 if (*body == NULL_RTX)
2679 return y == NULL_RTX;
2680
2681 /* Return true if a label_ref *BODY refers to label Y. */
2682 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2683 return XEXP (*body, 0) == y;
2684
2685 /* If *BODY is a reference to pool constant traverse the constant. */
2686 if (GET_CODE (*body) == SYMBOL_REF
2687 && CONSTANT_POOL_ADDRESS_P (*body))
2688 return rtx_referenced_p (y, get_pool_constant (*body));
2689
2690 /* By default, compare the RTL expressions. */
2691 return rtx_equal_p (*body, y);
2692 }
2693
2694 /* Return true if X is referenced in BODY. */
2695
2696 int
2697 rtx_referenced_p (rtx x, rtx body)
2698 {
2699 return for_each_rtx (&body, rtx_referenced_p_1, x);
2700 }
2701
2702 /* If INSN is a tablejump return true and store the label (before jump table) to
2703 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2704
2705 bool
2706 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2707 {
2708 rtx label, table;
2709
2710 if (!JUMP_P (insn))
2711 return false;
2712
2713 label = JUMP_LABEL (insn);
2714 if (label != NULL_RTX && !ANY_RETURN_P (label)
2715 && (table = next_active_insn (label)) != NULL_RTX
2716 && JUMP_TABLE_DATA_P (table))
2717 {
2718 gcc_assert (table == NEXT_INSN (label));
2719 if (labelp)
2720 *labelp = label;
2721 if (tablep)
2722 *tablep = table;
2723 return true;
2724 }
2725 return false;
2726 }
2727
2728 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2729 constant that is not in the constant pool and not in the condition
2730 of an IF_THEN_ELSE. */
2731
2732 static int
2733 computed_jump_p_1 (const_rtx x)
2734 {
2735 const enum rtx_code code = GET_CODE (x);
2736 int i, j;
2737 const char *fmt;
2738
2739 switch (code)
2740 {
2741 case LABEL_REF:
2742 case PC:
2743 return 0;
2744
2745 case CONST:
2746 CASE_CONST_ANY:
2747 case SYMBOL_REF:
2748 case REG:
2749 return 1;
2750
2751 case MEM:
2752 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2753 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2754
2755 case IF_THEN_ELSE:
2756 return (computed_jump_p_1 (XEXP (x, 1))
2757 || computed_jump_p_1 (XEXP (x, 2)));
2758
2759 default:
2760 break;
2761 }
2762
2763 fmt = GET_RTX_FORMAT (code);
2764 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2765 {
2766 if (fmt[i] == 'e'
2767 && computed_jump_p_1 (XEXP (x, i)))
2768 return 1;
2769
2770 else if (fmt[i] == 'E')
2771 for (j = 0; j < XVECLEN (x, i); j++)
2772 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2773 return 1;
2774 }
2775
2776 return 0;
2777 }
2778
2779 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2780
2781 Tablejumps and casesi insns are not considered indirect jumps;
2782 we can recognize them by a (use (label_ref)). */
2783
2784 int
2785 computed_jump_p (const_rtx insn)
2786 {
2787 int i;
2788 if (JUMP_P (insn))
2789 {
2790 rtx pat = PATTERN (insn);
2791
2792 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2793 if (JUMP_LABEL (insn) != NULL)
2794 return 0;
2795
2796 if (GET_CODE (pat) == PARALLEL)
2797 {
2798 int len = XVECLEN (pat, 0);
2799 int has_use_labelref = 0;
2800
2801 for (i = len - 1; i >= 0; i--)
2802 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2803 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2804 == LABEL_REF))
2805 has_use_labelref = 1;
2806
2807 if (! has_use_labelref)
2808 for (i = len - 1; i >= 0; i--)
2809 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2810 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2811 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2812 return 1;
2813 }
2814 else if (GET_CODE (pat) == SET
2815 && SET_DEST (pat) == pc_rtx
2816 && computed_jump_p_1 (SET_SRC (pat)))
2817 return 1;
2818 }
2819 return 0;
2820 }
2821
2822 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2823 calls. Processes the subexpressions of EXP and passes them to F. */
2824 static int
2825 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2826 {
2827 int result, i, j;
2828 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2829 rtx *x;
2830
2831 for (; format[n] != '\0'; n++)
2832 {
2833 switch (format[n])
2834 {
2835 case 'e':
2836 /* Call F on X. */
2837 x = &XEXP (exp, n);
2838 result = (*f) (x, data);
2839 if (result == -1)
2840 /* Do not traverse sub-expressions. */
2841 continue;
2842 else if (result != 0)
2843 /* Stop the traversal. */
2844 return result;
2845
2846 if (*x == NULL_RTX)
2847 /* There are no sub-expressions. */
2848 continue;
2849
2850 i = non_rtx_starting_operands[GET_CODE (*x)];
2851 if (i >= 0)
2852 {
2853 result = for_each_rtx_1 (*x, i, f, data);
2854 if (result != 0)
2855 return result;
2856 }
2857 break;
2858
2859 case 'V':
2860 case 'E':
2861 if (XVEC (exp, n) == 0)
2862 continue;
2863 for (j = 0; j < XVECLEN (exp, n); ++j)
2864 {
2865 /* Call F on X. */
2866 x = &XVECEXP (exp, n, j);
2867 result = (*f) (x, data);
2868 if (result == -1)
2869 /* Do not traverse sub-expressions. */
2870 continue;
2871 else if (result != 0)
2872 /* Stop the traversal. */
2873 return result;
2874
2875 if (*x == NULL_RTX)
2876 /* There are no sub-expressions. */
2877 continue;
2878
2879 i = non_rtx_starting_operands[GET_CODE (*x)];
2880 if (i >= 0)
2881 {
2882 result = for_each_rtx_1 (*x, i, f, data);
2883 if (result != 0)
2884 return result;
2885 }
2886 }
2887 break;
2888
2889 default:
2890 /* Nothing to do. */
2891 break;
2892 }
2893 }
2894
2895 return 0;
2896 }
2897
2898 /* Traverse X via depth-first search, calling F for each
2899 sub-expression (including X itself). F is also passed the DATA.
2900 If F returns -1, do not traverse sub-expressions, but continue
2901 traversing the rest of the tree. If F ever returns any other
2902 nonzero value, stop the traversal, and return the value returned
2903 by F. Otherwise, return 0. This function does not traverse inside
2904 tree structure that contains RTX_EXPRs, or into sub-expressions
2905 whose format code is `0' since it is not known whether or not those
2906 codes are actually RTL.
2907
2908 This routine is very general, and could (should?) be used to
2909 implement many of the other routines in this file. */
2910
2911 int
2912 for_each_rtx (rtx *x, rtx_function f, void *data)
2913 {
2914 int result;
2915 int i;
2916
2917 /* Call F on X. */
2918 result = (*f) (x, data);
2919 if (result == -1)
2920 /* Do not traverse sub-expressions. */
2921 return 0;
2922 else if (result != 0)
2923 /* Stop the traversal. */
2924 return result;
2925
2926 if (*x == NULL_RTX)
2927 /* There are no sub-expressions. */
2928 return 0;
2929
2930 i = non_rtx_starting_operands[GET_CODE (*x)];
2931 if (i < 0)
2932 return 0;
2933
2934 return for_each_rtx_1 (*x, i, f, data);
2935 }
2936
2937 \f
2938
2939 /* Data structure that holds the internal state communicated between
2940 for_each_inc_dec, for_each_inc_dec_find_mem and
2941 for_each_inc_dec_find_inc_dec. */
2942
2943 struct for_each_inc_dec_ops {
2944 /* The function to be called for each autoinc operation found. */
2945 for_each_inc_dec_fn fn;
2946 /* The opaque argument to be passed to it. */
2947 void *arg;
2948 /* The MEM we're visiting, if any. */
2949 rtx mem;
2950 };
2951
2952 static int for_each_inc_dec_find_mem (rtx *r, void *d);
2953
2954 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2955 operands of the equivalent add insn and pass the result to the
2956 operator specified by *D. */
2957
2958 static int
2959 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
2960 {
2961 rtx x = *r;
2962 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
2963
2964 switch (GET_CODE (x))
2965 {
2966 case PRE_INC:
2967 case POST_INC:
2968 {
2969 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2970 rtx r1 = XEXP (x, 0);
2971 rtx c = gen_int_mode (size, GET_MODE (r1));
2972 return data->fn (data->mem, x, r1, r1, c, data->arg);
2973 }
2974
2975 case PRE_DEC:
2976 case POST_DEC:
2977 {
2978 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2979 rtx r1 = XEXP (x, 0);
2980 rtx c = gen_int_mode (-size, GET_MODE (r1));
2981 return data->fn (data->mem, x, r1, r1, c, data->arg);
2982 }
2983
2984 case PRE_MODIFY:
2985 case POST_MODIFY:
2986 {
2987 rtx r1 = XEXP (x, 0);
2988 rtx add = XEXP (x, 1);
2989 return data->fn (data->mem, x, r1, add, NULL, data->arg);
2990 }
2991
2992 case MEM:
2993 {
2994 rtx save = data->mem;
2995 int ret = for_each_inc_dec_find_mem (r, d);
2996 data->mem = save;
2997 return ret;
2998 }
2999
3000 default:
3001 return 0;
3002 }
3003 }
3004
3005 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3006 address, extract the operands of the equivalent add insn and pass
3007 the result to the operator specified by *D. */
3008
3009 static int
3010 for_each_inc_dec_find_mem (rtx *r, void *d)
3011 {
3012 rtx x = *r;
3013 if (x != NULL_RTX && MEM_P (x))
3014 {
3015 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3016 int result;
3017
3018 data->mem = x;
3019
3020 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3021 data);
3022 if (result)
3023 return result;
3024
3025 return -1;
3026 }
3027 return 0;
3028 }
3029
3030 /* Traverse *X looking for MEMs, and for autoinc operations within
3031 them. For each such autoinc operation found, call FN, passing it
3032 the innermost enclosing MEM, the operation itself, the RTX modified
3033 by the operation, two RTXs (the second may be NULL) that, once
3034 added, represent the value to be held by the modified RTX
3035 afterwards, and ARG. FN is to return -1 to skip looking for other
3036 autoinc operations within the visited operation, 0 to continue the
3037 traversal, or any other value to have it returned to the caller of
3038 for_each_inc_dec. */
3039
3040 int
3041 for_each_inc_dec (rtx *x,
3042 for_each_inc_dec_fn fn,
3043 void *arg)
3044 {
3045 struct for_each_inc_dec_ops data;
3046
3047 data.fn = fn;
3048 data.arg = arg;
3049 data.mem = NULL;
3050
3051 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3052 }
3053
3054 \f
3055 /* Searches X for any reference to REGNO, returning the rtx of the
3056 reference found if any. Otherwise, returns NULL_RTX. */
3057
3058 rtx
3059 regno_use_in (unsigned int regno, rtx x)
3060 {
3061 const char *fmt;
3062 int i, j;
3063 rtx tem;
3064
3065 if (REG_P (x) && REGNO (x) == regno)
3066 return x;
3067
3068 fmt = GET_RTX_FORMAT (GET_CODE (x));
3069 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3070 {
3071 if (fmt[i] == 'e')
3072 {
3073 if ((tem = regno_use_in (regno, XEXP (x, i))))
3074 return tem;
3075 }
3076 else if (fmt[i] == 'E')
3077 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3078 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3079 return tem;
3080 }
3081
3082 return NULL_RTX;
3083 }
3084
3085 /* Return a value indicating whether OP, an operand of a commutative
3086 operation, is preferred as the first or second operand. The higher
3087 the value, the stronger the preference for being the first operand.
3088 We use negative values to indicate a preference for the first operand
3089 and positive values for the second operand. */
3090
3091 int
3092 commutative_operand_precedence (rtx op)
3093 {
3094 enum rtx_code code = GET_CODE (op);
3095
3096 /* Constants always come the second operand. Prefer "nice" constants. */
3097 if (code == CONST_INT)
3098 return -8;
3099 if (code == CONST_DOUBLE)
3100 return -7;
3101 if (code == CONST_FIXED)
3102 return -7;
3103 op = avoid_constant_pool_reference (op);
3104 code = GET_CODE (op);
3105
3106 switch (GET_RTX_CLASS (code))
3107 {
3108 case RTX_CONST_OBJ:
3109 if (code == CONST_INT)
3110 return -6;
3111 if (code == CONST_DOUBLE)
3112 return -5;
3113 if (code == CONST_FIXED)
3114 return -5;
3115 return -4;
3116
3117 case RTX_EXTRA:
3118 /* SUBREGs of objects should come second. */
3119 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3120 return -3;
3121 return 0;
3122
3123 case RTX_OBJ:
3124 /* Complex expressions should be the first, so decrease priority
3125 of objects. Prefer pointer objects over non pointer objects. */
3126 if ((REG_P (op) && REG_POINTER (op))
3127 || (MEM_P (op) && MEM_POINTER (op)))
3128 return -1;
3129 return -2;
3130
3131 case RTX_COMM_ARITH:
3132 /* Prefer operands that are themselves commutative to be first.
3133 This helps to make things linear. In particular,
3134 (and (and (reg) (reg)) (not (reg))) is canonical. */
3135 return 4;
3136
3137 case RTX_BIN_ARITH:
3138 /* If only one operand is a binary expression, it will be the first
3139 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3140 is canonical, although it will usually be further simplified. */
3141 return 2;
3142
3143 case RTX_UNARY:
3144 /* Then prefer NEG and NOT. */
3145 if (code == NEG || code == NOT)
3146 return 1;
3147
3148 default:
3149 return 0;
3150 }
3151 }
3152
3153 /* Return 1 iff it is necessary to swap operands of commutative operation
3154 in order to canonicalize expression. */
3155
3156 bool
3157 swap_commutative_operands_p (rtx x, rtx y)
3158 {
3159 return (commutative_operand_precedence (x)
3160 < commutative_operand_precedence (y));
3161 }
3162
3163 /* Return 1 if X is an autoincrement side effect and the register is
3164 not the stack pointer. */
3165 int
3166 auto_inc_p (const_rtx x)
3167 {
3168 switch (GET_CODE (x))
3169 {
3170 case PRE_INC:
3171 case POST_INC:
3172 case PRE_DEC:
3173 case POST_DEC:
3174 case PRE_MODIFY:
3175 case POST_MODIFY:
3176 /* There are no REG_INC notes for SP. */
3177 if (XEXP (x, 0) != stack_pointer_rtx)
3178 return 1;
3179 default:
3180 break;
3181 }
3182 return 0;
3183 }
3184
3185 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3186 int
3187 loc_mentioned_in_p (rtx *loc, const_rtx in)
3188 {
3189 enum rtx_code code;
3190 const char *fmt;
3191 int i, j;
3192
3193 if (!in)
3194 return 0;
3195
3196 code = GET_CODE (in);
3197 fmt = GET_RTX_FORMAT (code);
3198 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3199 {
3200 if (fmt[i] == 'e')
3201 {
3202 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3203 return 1;
3204 }
3205 else if (fmt[i] == 'E')
3206 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3207 if (loc == &XVECEXP (in, i, j)
3208 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3209 return 1;
3210 }
3211 return 0;
3212 }
3213
3214 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3215 and SUBREG_BYTE, return the bit offset where the subreg begins
3216 (counting from the least significant bit of the operand). */
3217
3218 unsigned int
3219 subreg_lsb_1 (enum machine_mode outer_mode,
3220 enum machine_mode inner_mode,
3221 unsigned int subreg_byte)
3222 {
3223 unsigned int bitpos;
3224 unsigned int byte;
3225 unsigned int word;
3226
3227 /* A paradoxical subreg begins at bit position 0. */
3228 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3229 return 0;
3230
3231 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3232 /* If the subreg crosses a word boundary ensure that
3233 it also begins and ends on a word boundary. */
3234 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3235 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3236 && (subreg_byte % UNITS_PER_WORD
3237 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3238
3239 if (WORDS_BIG_ENDIAN)
3240 word = (GET_MODE_SIZE (inner_mode)
3241 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3242 else
3243 word = subreg_byte / UNITS_PER_WORD;
3244 bitpos = word * BITS_PER_WORD;
3245
3246 if (BYTES_BIG_ENDIAN)
3247 byte = (GET_MODE_SIZE (inner_mode)
3248 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3249 else
3250 byte = subreg_byte % UNITS_PER_WORD;
3251 bitpos += byte * BITS_PER_UNIT;
3252
3253 return bitpos;
3254 }
3255
3256 /* Given a subreg X, return the bit offset where the subreg begins
3257 (counting from the least significant bit of the reg). */
3258
3259 unsigned int
3260 subreg_lsb (const_rtx x)
3261 {
3262 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3263 SUBREG_BYTE (x));
3264 }
3265
3266 /* Fill in information about a subreg of a hard register.
3267 xregno - A regno of an inner hard subreg_reg (or what will become one).
3268 xmode - The mode of xregno.
3269 offset - The byte offset.
3270 ymode - The mode of a top level SUBREG (or what may become one).
3271 info - Pointer to structure to fill in. */
3272 void
3273 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3274 unsigned int offset, enum machine_mode ymode,
3275 struct subreg_info *info)
3276 {
3277 int nregs_xmode, nregs_ymode;
3278 int mode_multiple, nregs_multiple;
3279 int offset_adj, y_offset, y_offset_adj;
3280 int regsize_xmode, regsize_ymode;
3281 bool rknown;
3282
3283 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3284
3285 rknown = false;
3286
3287 /* If there are holes in a non-scalar mode in registers, we expect
3288 that it is made up of its units concatenated together. */
3289 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3290 {
3291 enum machine_mode xmode_unit;
3292
3293 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3294 if (GET_MODE_INNER (xmode) == VOIDmode)
3295 xmode_unit = xmode;
3296 else
3297 xmode_unit = GET_MODE_INNER (xmode);
3298 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3299 gcc_assert (nregs_xmode
3300 == (GET_MODE_NUNITS (xmode)
3301 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3302 gcc_assert (hard_regno_nregs[xregno][xmode]
3303 == (hard_regno_nregs[xregno][xmode_unit]
3304 * GET_MODE_NUNITS (xmode)));
3305
3306 /* You can only ask for a SUBREG of a value with holes in the middle
3307 if you don't cross the holes. (Such a SUBREG should be done by
3308 picking a different register class, or doing it in memory if
3309 necessary.) An example of a value with holes is XCmode on 32-bit
3310 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3311 3 for each part, but in memory it's two 128-bit parts.
3312 Padding is assumed to be at the end (not necessarily the 'high part')
3313 of each unit. */
3314 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3315 < GET_MODE_NUNITS (xmode))
3316 && (offset / GET_MODE_SIZE (xmode_unit)
3317 != ((offset + GET_MODE_SIZE (ymode) - 1)
3318 / GET_MODE_SIZE (xmode_unit))))
3319 {
3320 info->representable_p = false;
3321 rknown = true;
3322 }
3323 }
3324 else
3325 nregs_xmode = hard_regno_nregs[xregno][xmode];
3326
3327 nregs_ymode = hard_regno_nregs[xregno][ymode];
3328
3329 /* Paradoxical subregs are otherwise valid. */
3330 if (!rknown
3331 && offset == 0
3332 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3333 {
3334 info->representable_p = true;
3335 /* If this is a big endian paradoxical subreg, which uses more
3336 actual hard registers than the original register, we must
3337 return a negative offset so that we find the proper highpart
3338 of the register. */
3339 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3340 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3341 info->offset = nregs_xmode - nregs_ymode;
3342 else
3343 info->offset = 0;
3344 info->nregs = nregs_ymode;
3345 return;
3346 }
3347
3348 /* If registers store different numbers of bits in the different
3349 modes, we cannot generally form this subreg. */
3350 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3351 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3352 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3353 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3354 {
3355 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3356 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3357 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3358 {
3359 info->representable_p = false;
3360 info->nregs
3361 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3362 info->offset = offset / regsize_xmode;
3363 return;
3364 }
3365 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3366 {
3367 info->representable_p = false;
3368 info->nregs
3369 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3370 info->offset = offset / regsize_xmode;
3371 return;
3372 }
3373 }
3374
3375 /* Lowpart subregs are otherwise valid. */
3376 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3377 {
3378 info->representable_p = true;
3379 rknown = true;
3380
3381 if (offset == 0 || nregs_xmode == nregs_ymode)
3382 {
3383 info->offset = 0;
3384 info->nregs = nregs_ymode;
3385 return;
3386 }
3387 }
3388
3389 /* This should always pass, otherwise we don't know how to verify
3390 the constraint. These conditions may be relaxed but
3391 subreg_regno_offset would need to be redesigned. */
3392 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3393 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3394
3395 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3396 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3397 {
3398 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3399 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3400 HOST_WIDE_INT off_low = offset & (ysize - 1);
3401 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3402 offset = (xsize - ysize - off_high) | off_low;
3403 }
3404 /* The XMODE value can be seen as a vector of NREGS_XMODE
3405 values. The subreg must represent a lowpart of given field.
3406 Compute what field it is. */
3407 offset_adj = offset;
3408 offset_adj -= subreg_lowpart_offset (ymode,
3409 mode_for_size (GET_MODE_BITSIZE (xmode)
3410 / nregs_xmode,
3411 MODE_INT, 0));
3412
3413 /* Size of ymode must not be greater than the size of xmode. */
3414 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3415 gcc_assert (mode_multiple != 0);
3416
3417 y_offset = offset / GET_MODE_SIZE (ymode);
3418 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3419 nregs_multiple = nregs_xmode / nregs_ymode;
3420
3421 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3422 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3423
3424 if (!rknown)
3425 {
3426 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3427 rknown = true;
3428 }
3429 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3430 info->nregs = nregs_ymode;
3431 }
3432
3433 /* This function returns the regno offset of a subreg expression.
3434 xregno - A regno of an inner hard subreg_reg (or what will become one).
3435 xmode - The mode of xregno.
3436 offset - The byte offset.
3437 ymode - The mode of a top level SUBREG (or what may become one).
3438 RETURN - The regno offset which would be used. */
3439 unsigned int
3440 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3441 unsigned int offset, enum machine_mode ymode)
3442 {
3443 struct subreg_info info;
3444 subreg_get_info (xregno, xmode, offset, ymode, &info);
3445 return info.offset;
3446 }
3447
3448 /* This function returns true when the offset is representable via
3449 subreg_offset in the given regno.
3450 xregno - A regno of an inner hard subreg_reg (or what will become one).
3451 xmode - The mode of xregno.
3452 offset - The byte offset.
3453 ymode - The mode of a top level SUBREG (or what may become one).
3454 RETURN - Whether the offset is representable. */
3455 bool
3456 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3457 unsigned int offset, enum machine_mode ymode)
3458 {
3459 struct subreg_info info;
3460 subreg_get_info (xregno, xmode, offset, ymode, &info);
3461 return info.representable_p;
3462 }
3463
3464 /* Return the number of a YMODE register to which
3465
3466 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3467
3468 can be simplified. Return -1 if the subreg can't be simplified.
3469
3470 XREGNO is a hard register number. */
3471
3472 int
3473 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3474 unsigned int offset, enum machine_mode ymode)
3475 {
3476 struct subreg_info info;
3477 unsigned int yregno;
3478
3479 #ifdef CANNOT_CHANGE_MODE_CLASS
3480 /* Give the backend a chance to disallow the mode change. */
3481 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3482 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3483 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3484 /* We can use mode change in LRA for some transformations. */
3485 && ! lra_in_progress)
3486 return -1;
3487 #endif
3488
3489 /* We shouldn't simplify stack-related registers. */
3490 if ((!reload_completed || frame_pointer_needed)
3491 && xregno == FRAME_POINTER_REGNUM)
3492 return -1;
3493
3494 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3495 && xregno == ARG_POINTER_REGNUM)
3496 return -1;
3497
3498 if (xregno == STACK_POINTER_REGNUM
3499 /* We should convert hard stack register in LRA if it is
3500 possible. */
3501 && ! lra_in_progress)
3502 return -1;
3503
3504 /* Try to get the register offset. */
3505 subreg_get_info (xregno, xmode, offset, ymode, &info);
3506 if (!info.representable_p)
3507 return -1;
3508
3509 /* Make sure that the offsetted register value is in range. */
3510 yregno = xregno + info.offset;
3511 if (!HARD_REGISTER_NUM_P (yregno))
3512 return -1;
3513
3514 /* See whether (reg:YMODE YREGNO) is valid.
3515
3516 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3517 This is a kludge to work around how complex FP arguments are passed
3518 on IA-64 and should be fixed. See PR target/49226. */
3519 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3520 && HARD_REGNO_MODE_OK (xregno, xmode))
3521 return -1;
3522
3523 return (int) yregno;
3524 }
3525
3526 /* Return the final regno that a subreg expression refers to. */
3527 unsigned int
3528 subreg_regno (const_rtx x)
3529 {
3530 unsigned int ret;
3531 rtx subreg = SUBREG_REG (x);
3532 int regno = REGNO (subreg);
3533
3534 ret = regno + subreg_regno_offset (regno,
3535 GET_MODE (subreg),
3536 SUBREG_BYTE (x),
3537 GET_MODE (x));
3538 return ret;
3539
3540 }
3541
3542 /* Return the number of registers that a subreg expression refers
3543 to. */
3544 unsigned int
3545 subreg_nregs (const_rtx x)
3546 {
3547 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3548 }
3549
3550 /* Return the number of registers that a subreg REG with REGNO
3551 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3552 changed so that the regno can be passed in. */
3553
3554 unsigned int
3555 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3556 {
3557 struct subreg_info info;
3558 rtx subreg = SUBREG_REG (x);
3559
3560 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3561 &info);
3562 return info.nregs;
3563 }
3564
3565
3566 struct parms_set_data
3567 {
3568 int nregs;
3569 HARD_REG_SET regs;
3570 };
3571
3572 /* Helper function for noticing stores to parameter registers. */
3573 static void
3574 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3575 {
3576 struct parms_set_data *const d = (struct parms_set_data *) data;
3577 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3578 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3579 {
3580 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3581 d->nregs--;
3582 }
3583 }
3584
3585 /* Look backward for first parameter to be loaded.
3586 Note that loads of all parameters will not necessarily be
3587 found if CSE has eliminated some of them (e.g., an argument
3588 to the outer function is passed down as a parameter).
3589 Do not skip BOUNDARY. */
3590 rtx
3591 find_first_parameter_load (rtx call_insn, rtx boundary)
3592 {
3593 struct parms_set_data parm;
3594 rtx p, before, first_set;
3595
3596 /* Since different machines initialize their parameter registers
3597 in different orders, assume nothing. Collect the set of all
3598 parameter registers. */
3599 CLEAR_HARD_REG_SET (parm.regs);
3600 parm.nregs = 0;
3601 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3602 if (GET_CODE (XEXP (p, 0)) == USE
3603 && REG_P (XEXP (XEXP (p, 0), 0)))
3604 {
3605 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3606
3607 /* We only care about registers which can hold function
3608 arguments. */
3609 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3610 continue;
3611
3612 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3613 parm.nregs++;
3614 }
3615 before = call_insn;
3616 first_set = call_insn;
3617
3618 /* Search backward for the first set of a register in this set. */
3619 while (parm.nregs && before != boundary)
3620 {
3621 before = PREV_INSN (before);
3622
3623 /* It is possible that some loads got CSEed from one call to
3624 another. Stop in that case. */
3625 if (CALL_P (before))
3626 break;
3627
3628 /* Our caller needs either ensure that we will find all sets
3629 (in case code has not been optimized yet), or take care
3630 for possible labels in a way by setting boundary to preceding
3631 CODE_LABEL. */
3632 if (LABEL_P (before))
3633 {
3634 gcc_assert (before == boundary);
3635 break;
3636 }
3637
3638 if (INSN_P (before))
3639 {
3640 int nregs_old = parm.nregs;
3641 note_stores (PATTERN (before), parms_set, &parm);
3642 /* If we found something that did not set a parameter reg,
3643 we're done. Do not keep going, as that might result
3644 in hoisting an insn before the setting of a pseudo
3645 that is used by the hoisted insn. */
3646 if (nregs_old != parm.nregs)
3647 first_set = before;
3648 else
3649 break;
3650 }
3651 }
3652 return first_set;
3653 }
3654
3655 /* Return true if we should avoid inserting code between INSN and preceding
3656 call instruction. */
3657
3658 bool
3659 keep_with_call_p (const_rtx insn)
3660 {
3661 rtx set;
3662
3663 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3664 {
3665 if (REG_P (SET_DEST (set))
3666 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3667 && fixed_regs[REGNO (SET_DEST (set))]
3668 && general_operand (SET_SRC (set), VOIDmode))
3669 return true;
3670 if (REG_P (SET_SRC (set))
3671 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3672 && REG_P (SET_DEST (set))
3673 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3674 return true;
3675 /* There may be a stack pop just after the call and before the store
3676 of the return register. Search for the actual store when deciding
3677 if we can break or not. */
3678 if (SET_DEST (set) == stack_pointer_rtx)
3679 {
3680 /* This CONST_CAST is okay because next_nonnote_insn just
3681 returns its argument and we assign it to a const_rtx
3682 variable. */
3683 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3684 if (i2 && keep_with_call_p (i2))
3685 return true;
3686 }
3687 }
3688 return false;
3689 }
3690
3691 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3692 to non-complex jumps. That is, direct unconditional, conditional,
3693 and tablejumps, but not computed jumps or returns. It also does
3694 not apply to the fallthru case of a conditional jump. */
3695
3696 bool
3697 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3698 {
3699 rtx tmp = JUMP_LABEL (jump_insn);
3700
3701 if (label == tmp)
3702 return true;
3703
3704 if (tablejump_p (jump_insn, NULL, &tmp))
3705 {
3706 rtvec vec = XVEC (PATTERN (tmp),
3707 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3708 int i, veclen = GET_NUM_ELEM (vec);
3709
3710 for (i = 0; i < veclen; ++i)
3711 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3712 return true;
3713 }
3714
3715 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3716 return true;
3717
3718 return false;
3719 }
3720
3721 \f
3722 /* Return an estimate of the cost of computing rtx X.
3723 One use is in cse, to decide which expression to keep in the hash table.
3724 Another is in rtl generation, to pick the cheapest way to multiply.
3725 Other uses like the latter are expected in the future.
3726
3727 X appears as operand OPNO in an expression with code OUTER_CODE.
3728 SPEED specifies whether costs optimized for speed or size should
3729 be returned. */
3730
3731 int
3732 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3733 {
3734 int i, j;
3735 enum rtx_code code;
3736 const char *fmt;
3737 int total;
3738 int factor;
3739
3740 if (x == 0)
3741 return 0;
3742
3743 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3744 many insns, taking N times as long. */
3745 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3746 if (factor == 0)
3747 factor = 1;
3748
3749 /* Compute the default costs of certain things.
3750 Note that targetm.rtx_costs can override the defaults. */
3751
3752 code = GET_CODE (x);
3753 switch (code)
3754 {
3755 case MULT:
3756 /* Multiplication has time-complexity O(N*N), where N is the
3757 number of units (translated from digits) when using
3758 schoolbook long multiplication. */
3759 total = factor * factor * COSTS_N_INSNS (5);
3760 break;
3761 case DIV:
3762 case UDIV:
3763 case MOD:
3764 case UMOD:
3765 /* Similarly, complexity for schoolbook long division. */
3766 total = factor * factor * COSTS_N_INSNS (7);
3767 break;
3768 case USE:
3769 /* Used in combine.c as a marker. */
3770 total = 0;
3771 break;
3772 case SET:
3773 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3774 the mode for the factor. */
3775 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3776 if (factor == 0)
3777 factor = 1;
3778 /* Pass through. */
3779 default:
3780 total = factor * COSTS_N_INSNS (1);
3781 }
3782
3783 switch (code)
3784 {
3785 case REG:
3786 return 0;
3787
3788 case SUBREG:
3789 total = 0;
3790 /* If we can't tie these modes, make this expensive. The larger
3791 the mode, the more expensive it is. */
3792 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3793 return COSTS_N_INSNS (2 + factor);
3794 break;
3795
3796 default:
3797 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3798 return total;
3799 break;
3800 }
3801
3802 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3803 which is already in total. */
3804
3805 fmt = GET_RTX_FORMAT (code);
3806 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3807 if (fmt[i] == 'e')
3808 total += rtx_cost (XEXP (x, i), code, i, speed);
3809 else if (fmt[i] == 'E')
3810 for (j = 0; j < XVECLEN (x, i); j++)
3811 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3812
3813 return total;
3814 }
3815
3816 /* Fill in the structure C with information about both speed and size rtx
3817 costs for X, which is operand OPNO in an expression with code OUTER. */
3818
3819 void
3820 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3821 struct full_rtx_costs *c)
3822 {
3823 c->speed = rtx_cost (x, outer, opno, true);
3824 c->size = rtx_cost (x, outer, opno, false);
3825 }
3826
3827 \f
3828 /* Return cost of address expression X.
3829 Expect that X is properly formed address reference.
3830
3831 SPEED parameter specify whether costs optimized for speed or size should
3832 be returned. */
3833
3834 int
3835 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3836 {
3837 /* We may be asked for cost of various unusual addresses, such as operands
3838 of push instruction. It is not worthwhile to complicate writing
3839 of the target hook by such cases. */
3840
3841 if (!memory_address_addr_space_p (mode, x, as))
3842 return 1000;
3843
3844 return targetm.address_cost (x, mode, as, speed);
3845 }
3846
3847 /* If the target doesn't override, compute the cost as with arithmetic. */
3848
3849 int
3850 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3851 {
3852 return rtx_cost (x, MEM, 0, speed);
3853 }
3854 \f
3855
3856 unsigned HOST_WIDE_INT
3857 nonzero_bits (const_rtx x, enum machine_mode mode)
3858 {
3859 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3860 }
3861
3862 unsigned int
3863 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3864 {
3865 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3866 }
3867
3868 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3869 It avoids exponential behavior in nonzero_bits1 when X has
3870 identical subexpressions on the first or the second level. */
3871
3872 static unsigned HOST_WIDE_INT
3873 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3874 enum machine_mode known_mode,
3875 unsigned HOST_WIDE_INT known_ret)
3876 {
3877 if (x == known_x && mode == known_mode)
3878 return known_ret;
3879
3880 /* Try to find identical subexpressions. If found call
3881 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3882 precomputed value for the subexpression as KNOWN_RET. */
3883
3884 if (ARITHMETIC_P (x))
3885 {
3886 rtx x0 = XEXP (x, 0);
3887 rtx x1 = XEXP (x, 1);
3888
3889 /* Check the first level. */
3890 if (x0 == x1)
3891 return nonzero_bits1 (x, mode, x0, mode,
3892 cached_nonzero_bits (x0, mode, known_x,
3893 known_mode, known_ret));
3894
3895 /* Check the second level. */
3896 if (ARITHMETIC_P (x0)
3897 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3898 return nonzero_bits1 (x, mode, x1, mode,
3899 cached_nonzero_bits (x1, mode, known_x,
3900 known_mode, known_ret));
3901
3902 if (ARITHMETIC_P (x1)
3903 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3904 return nonzero_bits1 (x, mode, x0, mode,
3905 cached_nonzero_bits (x0, mode, known_x,
3906 known_mode, known_ret));
3907 }
3908
3909 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3910 }
3911
3912 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3913 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3914 is less useful. We can't allow both, because that results in exponential
3915 run time recursion. There is a nullstone testcase that triggered
3916 this. This macro avoids accidental uses of num_sign_bit_copies. */
3917 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3918
3919 /* Given an expression, X, compute which bits in X can be nonzero.
3920 We don't care about bits outside of those defined in MODE.
3921
3922 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3923 an arithmetic operation, we can do better. */
3924
3925 static unsigned HOST_WIDE_INT
3926 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3927 enum machine_mode known_mode,
3928 unsigned HOST_WIDE_INT known_ret)
3929 {
3930 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3931 unsigned HOST_WIDE_INT inner_nz;
3932 enum rtx_code code;
3933 enum machine_mode inner_mode;
3934 unsigned int mode_width = GET_MODE_PRECISION (mode);
3935
3936 /* For floating-point and vector values, assume all bits are needed. */
3937 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3938 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3939 return nonzero;
3940
3941 /* If X is wider than MODE, use its mode instead. */
3942 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
3943 {
3944 mode = GET_MODE (x);
3945 nonzero = GET_MODE_MASK (mode);
3946 mode_width = GET_MODE_PRECISION (mode);
3947 }
3948
3949 if (mode_width > HOST_BITS_PER_WIDE_INT)
3950 /* Our only callers in this case look for single bit values. So
3951 just return the mode mask. Those tests will then be false. */
3952 return nonzero;
3953
3954 #ifndef WORD_REGISTER_OPERATIONS
3955 /* If MODE is wider than X, but both are a single word for both the host
3956 and target machines, we can compute this from which bits of the
3957 object might be nonzero in its own mode, taking into account the fact
3958 that on many CISC machines, accessing an object in a wider mode
3959 causes the high-order bits to become undefined. So they are
3960 not known to be zero. */
3961
3962 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3963 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
3964 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3965 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
3966 {
3967 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3968 known_x, known_mode, known_ret);
3969 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3970 return nonzero;
3971 }
3972 #endif
3973
3974 code = GET_CODE (x);
3975 switch (code)
3976 {
3977 case REG:
3978 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3979 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3980 all the bits above ptr_mode are known to be zero. */
3981 /* As we do not know which address space the pointer is referring to,
3982 we can do this only if the target does not support different pointer
3983 or address modes depending on the address space. */
3984 if (target_default_pointer_address_modes_p ()
3985 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3986 && REG_POINTER (x))
3987 nonzero &= GET_MODE_MASK (ptr_mode);
3988 #endif
3989
3990 /* Include declared information about alignment of pointers. */
3991 /* ??? We don't properly preserve REG_POINTER changes across
3992 pointer-to-integer casts, so we can't trust it except for
3993 things that we know must be pointers. See execute/960116-1.c. */
3994 if ((x == stack_pointer_rtx
3995 || x == frame_pointer_rtx
3996 || x == arg_pointer_rtx)
3997 && REGNO_POINTER_ALIGN (REGNO (x)))
3998 {
3999 unsigned HOST_WIDE_INT alignment
4000 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4001
4002 #ifdef PUSH_ROUNDING
4003 /* If PUSH_ROUNDING is defined, it is possible for the
4004 stack to be momentarily aligned only to that amount,
4005 so we pick the least alignment. */
4006 if (x == stack_pointer_rtx && PUSH_ARGS)
4007 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4008 alignment);
4009 #endif
4010
4011 nonzero &= ~(alignment - 1);
4012 }
4013
4014 {
4015 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4016 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4017 known_mode, known_ret,
4018 &nonzero_for_hook);
4019
4020 if (new_rtx)
4021 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4022 known_mode, known_ret);
4023
4024 return nonzero_for_hook;
4025 }
4026
4027 case CONST_INT:
4028 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4029 /* If X is negative in MODE, sign-extend the value. */
4030 if (INTVAL (x) > 0
4031 && mode_width < BITS_PER_WORD
4032 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4033 != 0)
4034 return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
4035 #endif
4036
4037 return UINTVAL (x);
4038
4039 case MEM:
4040 #ifdef LOAD_EXTEND_OP
4041 /* In many, if not most, RISC machines, reading a byte from memory
4042 zeros the rest of the register. Noticing that fact saves a lot
4043 of extra zero-extends. */
4044 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4045 nonzero &= GET_MODE_MASK (GET_MODE (x));
4046 #endif
4047 break;
4048
4049 case EQ: case NE:
4050 case UNEQ: case LTGT:
4051 case GT: case GTU: case UNGT:
4052 case LT: case LTU: case UNLT:
4053 case GE: case GEU: case UNGE:
4054 case LE: case LEU: case UNLE:
4055 case UNORDERED: case ORDERED:
4056 /* If this produces an integer result, we know which bits are set.
4057 Code here used to clear bits outside the mode of X, but that is
4058 now done above. */
4059 /* Mind that MODE is the mode the caller wants to look at this
4060 operation in, and not the actual operation mode. We can wind
4061 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4062 that describes the results of a vector compare. */
4063 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4064 && mode_width <= HOST_BITS_PER_WIDE_INT)
4065 nonzero = STORE_FLAG_VALUE;
4066 break;
4067
4068 case NEG:
4069 #if 0
4070 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4071 and num_sign_bit_copies. */
4072 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4073 == GET_MODE_PRECISION (GET_MODE (x)))
4074 nonzero = 1;
4075 #endif
4076
4077 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4078 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4079 break;
4080
4081 case ABS:
4082 #if 0
4083 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4084 and num_sign_bit_copies. */
4085 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4086 == GET_MODE_PRECISION (GET_MODE (x)))
4087 nonzero = 1;
4088 #endif
4089 break;
4090
4091 case TRUNCATE:
4092 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4093 known_x, known_mode, known_ret)
4094 & GET_MODE_MASK (mode));
4095 break;
4096
4097 case ZERO_EXTEND:
4098 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4099 known_x, known_mode, known_ret);
4100 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4101 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4102 break;
4103
4104 case SIGN_EXTEND:
4105 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4106 Otherwise, show all the bits in the outer mode but not the inner
4107 may be nonzero. */
4108 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4109 known_x, known_mode, known_ret);
4110 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4111 {
4112 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4113 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4114 inner_nz |= (GET_MODE_MASK (mode)
4115 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4116 }
4117
4118 nonzero &= inner_nz;
4119 break;
4120
4121 case AND:
4122 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4123 known_x, known_mode, known_ret)
4124 & cached_nonzero_bits (XEXP (x, 1), mode,
4125 known_x, known_mode, known_ret);
4126 break;
4127
4128 case XOR: case IOR:
4129 case UMIN: case UMAX: case SMIN: case SMAX:
4130 {
4131 unsigned HOST_WIDE_INT nonzero0
4132 = cached_nonzero_bits (XEXP (x, 0), mode,
4133 known_x, known_mode, known_ret);
4134
4135 /* Don't call nonzero_bits for the second time if it cannot change
4136 anything. */
4137 if ((nonzero & nonzero0) != nonzero)
4138 nonzero &= nonzero0
4139 | cached_nonzero_bits (XEXP (x, 1), mode,
4140 known_x, known_mode, known_ret);
4141 }
4142 break;
4143
4144 case PLUS: case MINUS:
4145 case MULT:
4146 case DIV: case UDIV:
4147 case MOD: case UMOD:
4148 /* We can apply the rules of arithmetic to compute the number of
4149 high- and low-order zero bits of these operations. We start by
4150 computing the width (position of the highest-order nonzero bit)
4151 and the number of low-order zero bits for each value. */
4152 {
4153 unsigned HOST_WIDE_INT nz0
4154 = cached_nonzero_bits (XEXP (x, 0), mode,
4155 known_x, known_mode, known_ret);
4156 unsigned HOST_WIDE_INT nz1
4157 = cached_nonzero_bits (XEXP (x, 1), mode,
4158 known_x, known_mode, known_ret);
4159 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4160 int width0 = floor_log2 (nz0) + 1;
4161 int width1 = floor_log2 (nz1) + 1;
4162 int low0 = floor_log2 (nz0 & -nz0);
4163 int low1 = floor_log2 (nz1 & -nz1);
4164 unsigned HOST_WIDE_INT op0_maybe_minusp
4165 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4166 unsigned HOST_WIDE_INT op1_maybe_minusp
4167 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4168 unsigned int result_width = mode_width;
4169 int result_low = 0;
4170
4171 switch (code)
4172 {
4173 case PLUS:
4174 result_width = MAX (width0, width1) + 1;
4175 result_low = MIN (low0, low1);
4176 break;
4177 case MINUS:
4178 result_low = MIN (low0, low1);
4179 break;
4180 case MULT:
4181 result_width = width0 + width1;
4182 result_low = low0 + low1;
4183 break;
4184 case DIV:
4185 if (width1 == 0)
4186 break;
4187 if (!op0_maybe_minusp && !op1_maybe_minusp)
4188 result_width = width0;
4189 break;
4190 case UDIV:
4191 if (width1 == 0)
4192 break;
4193 result_width = width0;
4194 break;
4195 case MOD:
4196 if (width1 == 0)
4197 break;
4198 if (!op0_maybe_minusp && !op1_maybe_minusp)
4199 result_width = MIN (width0, width1);
4200 result_low = MIN (low0, low1);
4201 break;
4202 case UMOD:
4203 if (width1 == 0)
4204 break;
4205 result_width = MIN (width0, width1);
4206 result_low = MIN (low0, low1);
4207 break;
4208 default:
4209 gcc_unreachable ();
4210 }
4211
4212 if (result_width < mode_width)
4213 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4214
4215 if (result_low > 0)
4216 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4217 }
4218 break;
4219
4220 case ZERO_EXTRACT:
4221 if (CONST_INT_P (XEXP (x, 1))
4222 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4223 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4224 break;
4225
4226 case SUBREG:
4227 /* If this is a SUBREG formed for a promoted variable that has
4228 been zero-extended, we know that at least the high-order bits
4229 are zero, though others might be too. */
4230
4231 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4232 nonzero = GET_MODE_MASK (GET_MODE (x))
4233 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4234 known_x, known_mode, known_ret);
4235
4236 inner_mode = GET_MODE (SUBREG_REG (x));
4237 /* If the inner mode is a single word for both the host and target
4238 machines, we can compute this from which bits of the inner
4239 object might be nonzero. */
4240 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4241 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4242 {
4243 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4244 known_x, known_mode, known_ret);
4245
4246 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4247 /* If this is a typical RISC machine, we only have to worry
4248 about the way loads are extended. */
4249 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4250 ? val_signbit_known_set_p (inner_mode, nonzero)
4251 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4252 || !MEM_P (SUBREG_REG (x)))
4253 #endif
4254 {
4255 /* On many CISC machines, accessing an object in a wider mode
4256 causes the high-order bits to become undefined. So they are
4257 not known to be zero. */
4258 if (GET_MODE_PRECISION (GET_MODE (x))
4259 > GET_MODE_PRECISION (inner_mode))
4260 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4261 & ~GET_MODE_MASK (inner_mode));
4262 }
4263 }
4264 break;
4265
4266 case ASHIFTRT:
4267 case LSHIFTRT:
4268 case ASHIFT:
4269 case ROTATE:
4270 /* The nonzero bits are in two classes: any bits within MODE
4271 that aren't in GET_MODE (x) are always significant. The rest of the
4272 nonzero bits are those that are significant in the operand of
4273 the shift when shifted the appropriate number of bits. This
4274 shows that high-order bits are cleared by the right shift and
4275 low-order bits by left shifts. */
4276 if (CONST_INT_P (XEXP (x, 1))
4277 && INTVAL (XEXP (x, 1)) >= 0
4278 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4279 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4280 {
4281 enum machine_mode inner_mode = GET_MODE (x);
4282 unsigned int width = GET_MODE_PRECISION (inner_mode);
4283 int count = INTVAL (XEXP (x, 1));
4284 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4285 unsigned HOST_WIDE_INT op_nonzero
4286 = cached_nonzero_bits (XEXP (x, 0), mode,
4287 known_x, known_mode, known_ret);
4288 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4289 unsigned HOST_WIDE_INT outer = 0;
4290
4291 if (mode_width > width)
4292 outer = (op_nonzero & nonzero & ~mode_mask);
4293
4294 if (code == LSHIFTRT)
4295 inner >>= count;
4296 else if (code == ASHIFTRT)
4297 {
4298 inner >>= count;
4299
4300 /* If the sign bit may have been nonzero before the shift, we
4301 need to mark all the places it could have been copied to
4302 by the shift as possibly nonzero. */
4303 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4304 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4305 << (width - count);
4306 }
4307 else if (code == ASHIFT)
4308 inner <<= count;
4309 else
4310 inner = ((inner << (count % width)
4311 | (inner >> (width - (count % width)))) & mode_mask);
4312
4313 nonzero &= (outer | inner);
4314 }
4315 break;
4316
4317 case FFS:
4318 case POPCOUNT:
4319 /* This is at most the number of bits in the mode. */
4320 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4321 break;
4322
4323 case CLZ:
4324 /* If CLZ has a known value at zero, then the nonzero bits are
4325 that value, plus the number of bits in the mode minus one. */
4326 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4327 nonzero
4328 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4329 else
4330 nonzero = -1;
4331 break;
4332
4333 case CTZ:
4334 /* If CTZ has a known value at zero, then the nonzero bits are
4335 that value, plus the number of bits in the mode minus one. */
4336 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4337 nonzero
4338 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4339 else
4340 nonzero = -1;
4341 break;
4342
4343 case CLRSB:
4344 /* This is at most the number of bits in the mode minus 1. */
4345 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4346 break;
4347
4348 case PARITY:
4349 nonzero = 1;
4350 break;
4351
4352 case IF_THEN_ELSE:
4353 {
4354 unsigned HOST_WIDE_INT nonzero_true
4355 = cached_nonzero_bits (XEXP (x, 1), mode,
4356 known_x, known_mode, known_ret);
4357
4358 /* Don't call nonzero_bits for the second time if it cannot change
4359 anything. */
4360 if ((nonzero & nonzero_true) != nonzero)
4361 nonzero &= nonzero_true
4362 | cached_nonzero_bits (XEXP (x, 2), mode,
4363 known_x, known_mode, known_ret);
4364 }
4365 break;
4366
4367 default:
4368 break;
4369 }
4370
4371 return nonzero;
4372 }
4373
4374 /* See the macro definition above. */
4375 #undef cached_num_sign_bit_copies
4376
4377 \f
4378 /* The function cached_num_sign_bit_copies is a wrapper around
4379 num_sign_bit_copies1. It avoids exponential behavior in
4380 num_sign_bit_copies1 when X has identical subexpressions on the
4381 first or the second level. */
4382
4383 static unsigned int
4384 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4385 enum machine_mode known_mode,
4386 unsigned int known_ret)
4387 {
4388 if (x == known_x && mode == known_mode)
4389 return known_ret;
4390
4391 /* Try to find identical subexpressions. If found call
4392 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4393 the precomputed value for the subexpression as KNOWN_RET. */
4394
4395 if (ARITHMETIC_P (x))
4396 {
4397 rtx x0 = XEXP (x, 0);
4398 rtx x1 = XEXP (x, 1);
4399
4400 /* Check the first level. */
4401 if (x0 == x1)
4402 return
4403 num_sign_bit_copies1 (x, mode, x0, mode,
4404 cached_num_sign_bit_copies (x0, mode, known_x,
4405 known_mode,
4406 known_ret));
4407
4408 /* Check the second level. */
4409 if (ARITHMETIC_P (x0)
4410 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4411 return
4412 num_sign_bit_copies1 (x, mode, x1, mode,
4413 cached_num_sign_bit_copies (x1, mode, known_x,
4414 known_mode,
4415 known_ret));
4416
4417 if (ARITHMETIC_P (x1)
4418 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4419 return
4420 num_sign_bit_copies1 (x, mode, x0, mode,
4421 cached_num_sign_bit_copies (x0, mode, known_x,
4422 known_mode,
4423 known_ret));
4424 }
4425
4426 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4427 }
4428
4429 /* Return the number of bits at the high-order end of X that are known to
4430 be equal to the sign bit. X will be used in mode MODE; if MODE is
4431 VOIDmode, X will be used in its own mode. The returned value will always
4432 be between 1 and the number of bits in MODE. */
4433
4434 static unsigned int
4435 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4436 enum machine_mode known_mode,
4437 unsigned int known_ret)
4438 {
4439 enum rtx_code code = GET_CODE (x);
4440 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4441 int num0, num1, result;
4442 unsigned HOST_WIDE_INT nonzero;
4443
4444 /* If we weren't given a mode, use the mode of X. If the mode is still
4445 VOIDmode, we don't know anything. Likewise if one of the modes is
4446 floating-point. */
4447
4448 if (mode == VOIDmode)
4449 mode = GET_MODE (x);
4450
4451 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4452 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4453 return 1;
4454
4455 /* For a smaller object, just ignore the high bits. */
4456 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4457 {
4458 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4459 known_x, known_mode, known_ret);
4460 return MAX (1,
4461 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4462 }
4463
4464 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4465 {
4466 #ifndef WORD_REGISTER_OPERATIONS
4467 /* If this machine does not do all register operations on the entire
4468 register and MODE is wider than the mode of X, we can say nothing
4469 at all about the high-order bits. */
4470 return 1;
4471 #else
4472 /* Likewise on machines that do, if the mode of the object is smaller
4473 than a word and loads of that size don't sign extend, we can say
4474 nothing about the high order bits. */
4475 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4476 #ifdef LOAD_EXTEND_OP
4477 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4478 #endif
4479 )
4480 return 1;
4481 #endif
4482 }
4483
4484 switch (code)
4485 {
4486 case REG:
4487
4488 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4489 /* If pointers extend signed and this is a pointer in Pmode, say that
4490 all the bits above ptr_mode are known to be sign bit copies. */
4491 /* As we do not know which address space the pointer is referring to,
4492 we can do this only if the target does not support different pointer
4493 or address modes depending on the address space. */
4494 if (target_default_pointer_address_modes_p ()
4495 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4496 && mode == Pmode && REG_POINTER (x))
4497 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4498 #endif
4499
4500 {
4501 unsigned int copies_for_hook = 1, copies = 1;
4502 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4503 known_mode, known_ret,
4504 &copies_for_hook);
4505
4506 if (new_rtx)
4507 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4508 known_mode, known_ret);
4509
4510 if (copies > 1 || copies_for_hook > 1)
4511 return MAX (copies, copies_for_hook);
4512
4513 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4514 }
4515 break;
4516
4517 case MEM:
4518 #ifdef LOAD_EXTEND_OP
4519 /* Some RISC machines sign-extend all loads of smaller than a word. */
4520 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4521 return MAX (1, ((int) bitwidth
4522 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4523 #endif
4524 break;
4525
4526 case CONST_INT:
4527 /* If the constant is negative, take its 1's complement and remask.
4528 Then see how many zero bits we have. */
4529 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4530 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4531 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4532 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4533
4534 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4535
4536 case SUBREG:
4537 /* If this is a SUBREG for a promoted object that is sign-extended
4538 and we are looking at it in a wider mode, we know that at least the
4539 high-order bits are known to be sign bit copies. */
4540
4541 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4542 {
4543 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4544 known_x, known_mode, known_ret);
4545 return MAX ((int) bitwidth
4546 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4547 num0);
4548 }
4549
4550 /* For a smaller object, just ignore the high bits. */
4551 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4552 {
4553 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4554 known_x, known_mode, known_ret);
4555 return MAX (1, (num0
4556 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4557 - bitwidth)));
4558 }
4559
4560 #ifdef WORD_REGISTER_OPERATIONS
4561 #ifdef LOAD_EXTEND_OP
4562 /* For paradoxical SUBREGs on machines where all register operations
4563 affect the entire register, just look inside. Note that we are
4564 passing MODE to the recursive call, so the number of sign bit copies
4565 will remain relative to that mode, not the inner mode. */
4566
4567 /* This works only if loads sign extend. Otherwise, if we get a
4568 reload for the inner part, it may be loaded from the stack, and
4569 then we lose all sign bit copies that existed before the store
4570 to the stack. */
4571
4572 if (paradoxical_subreg_p (x)
4573 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4574 && MEM_P (SUBREG_REG (x)))
4575 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4576 known_x, known_mode, known_ret);
4577 #endif
4578 #endif
4579 break;
4580
4581 case SIGN_EXTRACT:
4582 if (CONST_INT_P (XEXP (x, 1)))
4583 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4584 break;
4585
4586 case SIGN_EXTEND:
4587 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4588 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4589 known_x, known_mode, known_ret));
4590
4591 case TRUNCATE:
4592 /* For a smaller object, just ignore the high bits. */
4593 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4594 known_x, known_mode, known_ret);
4595 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4596 - bitwidth)));
4597
4598 case NOT:
4599 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4600 known_x, known_mode, known_ret);
4601
4602 case ROTATE: case ROTATERT:
4603 /* If we are rotating left by a number of bits less than the number
4604 of sign bit copies, we can just subtract that amount from the
4605 number. */
4606 if (CONST_INT_P (XEXP (x, 1))
4607 && INTVAL (XEXP (x, 1)) >= 0
4608 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4609 {
4610 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4611 known_x, known_mode, known_ret);
4612 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4613 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4614 }
4615 break;
4616
4617 case NEG:
4618 /* In general, this subtracts one sign bit copy. But if the value
4619 is known to be positive, the number of sign bit copies is the
4620 same as that of the input. Finally, if the input has just one bit
4621 that might be nonzero, all the bits are copies of the sign bit. */
4622 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4623 known_x, known_mode, known_ret);
4624 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4625 return num0 > 1 ? num0 - 1 : 1;
4626
4627 nonzero = nonzero_bits (XEXP (x, 0), mode);
4628 if (nonzero == 1)
4629 return bitwidth;
4630
4631 if (num0 > 1
4632 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4633 num0--;
4634
4635 return num0;
4636
4637 case IOR: case AND: case XOR:
4638 case SMIN: case SMAX: case UMIN: case UMAX:
4639 /* Logical operations will preserve the number of sign-bit copies.
4640 MIN and MAX operations always return one of the operands. */
4641 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4642 known_x, known_mode, known_ret);
4643 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4644 known_x, known_mode, known_ret);
4645
4646 /* If num1 is clearing some of the top bits then regardless of
4647 the other term, we are guaranteed to have at least that many
4648 high-order zero bits. */
4649 if (code == AND
4650 && num1 > 1
4651 && bitwidth <= HOST_BITS_PER_WIDE_INT
4652 && CONST_INT_P (XEXP (x, 1))
4653 && (UINTVAL (XEXP (x, 1))
4654 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4655 return num1;
4656
4657 /* Similarly for IOR when setting high-order bits. */
4658 if (code == IOR
4659 && num1 > 1
4660 && bitwidth <= HOST_BITS_PER_WIDE_INT
4661 && CONST_INT_P (XEXP (x, 1))
4662 && (UINTVAL (XEXP (x, 1))
4663 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4664 return num1;
4665
4666 return MIN (num0, num1);
4667
4668 case PLUS: case MINUS:
4669 /* For addition and subtraction, we can have a 1-bit carry. However,
4670 if we are subtracting 1 from a positive number, there will not
4671 be such a carry. Furthermore, if the positive number is known to
4672 be 0 or 1, we know the result is either -1 or 0. */
4673
4674 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4675 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4676 {
4677 nonzero = nonzero_bits (XEXP (x, 0), mode);
4678 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4679 return (nonzero == 1 || nonzero == 0 ? bitwidth
4680 : bitwidth - floor_log2 (nonzero) - 1);
4681 }
4682
4683 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4684 known_x, known_mode, known_ret);
4685 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4686 known_x, known_mode, known_ret);
4687 result = MAX (1, MIN (num0, num1) - 1);
4688
4689 return result;
4690
4691 case MULT:
4692 /* The number of bits of the product is the sum of the number of
4693 bits of both terms. However, unless one of the terms if known
4694 to be positive, we must allow for an additional bit since negating
4695 a negative number can remove one sign bit copy. */
4696
4697 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4698 known_x, known_mode, known_ret);
4699 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4700 known_x, known_mode, known_ret);
4701
4702 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4703 if (result > 0
4704 && (bitwidth > HOST_BITS_PER_WIDE_INT
4705 || (((nonzero_bits (XEXP (x, 0), mode)
4706 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4707 && ((nonzero_bits (XEXP (x, 1), mode)
4708 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4709 != 0))))
4710 result--;
4711
4712 return MAX (1, result);
4713
4714 case UDIV:
4715 /* The result must be <= the first operand. If the first operand
4716 has the high bit set, we know nothing about the number of sign
4717 bit copies. */
4718 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4719 return 1;
4720 else if ((nonzero_bits (XEXP (x, 0), mode)
4721 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4722 return 1;
4723 else
4724 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4725 known_x, known_mode, known_ret);
4726
4727 case UMOD:
4728 /* The result must be <= the second operand. If the second operand
4729 has (or just might have) the high bit set, we know nothing about
4730 the number of sign bit copies. */
4731 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4732 return 1;
4733 else if ((nonzero_bits (XEXP (x, 1), mode)
4734 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4735 return 1;
4736 else
4737 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4738 known_x, known_mode, known_ret);
4739
4740 case DIV:
4741 /* Similar to unsigned division, except that we have to worry about
4742 the case where the divisor is negative, in which case we have
4743 to add 1. */
4744 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4745 known_x, known_mode, known_ret);
4746 if (result > 1
4747 && (bitwidth > HOST_BITS_PER_WIDE_INT
4748 || (nonzero_bits (XEXP (x, 1), mode)
4749 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4750 result--;
4751
4752 return result;
4753
4754 case MOD:
4755 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4756 known_x, known_mode, known_ret);
4757 if (result > 1
4758 && (bitwidth > HOST_BITS_PER_WIDE_INT
4759 || (nonzero_bits (XEXP (x, 1), mode)
4760 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4761 result--;
4762
4763 return result;
4764
4765 case ASHIFTRT:
4766 /* Shifts by a constant add to the number of bits equal to the
4767 sign bit. */
4768 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4769 known_x, known_mode, known_ret);
4770 if (CONST_INT_P (XEXP (x, 1))
4771 && INTVAL (XEXP (x, 1)) > 0
4772 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4773 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4774
4775 return num0;
4776
4777 case ASHIFT:
4778 /* Left shifts destroy copies. */
4779 if (!CONST_INT_P (XEXP (x, 1))
4780 || INTVAL (XEXP (x, 1)) < 0
4781 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4782 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4783 return 1;
4784
4785 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4786 known_x, known_mode, known_ret);
4787 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4788
4789 case IF_THEN_ELSE:
4790 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4791 known_x, known_mode, known_ret);
4792 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4793 known_x, known_mode, known_ret);
4794 return MIN (num0, num1);
4795
4796 case EQ: case NE: case GE: case GT: case LE: case LT:
4797 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4798 case GEU: case GTU: case LEU: case LTU:
4799 case UNORDERED: case ORDERED:
4800 /* If the constant is negative, take its 1's complement and remask.
4801 Then see how many zero bits we have. */
4802 nonzero = STORE_FLAG_VALUE;
4803 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4804 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4805 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4806
4807 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4808
4809 default:
4810 break;
4811 }
4812
4813 /* If we haven't been able to figure it out by one of the above rules,
4814 see if some of the high-order bits are known to be zero. If so,
4815 count those bits and return one less than that amount. If we can't
4816 safely compute the mask for this mode, always return BITWIDTH. */
4817
4818 bitwidth = GET_MODE_PRECISION (mode);
4819 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4820 return 1;
4821
4822 nonzero = nonzero_bits (x, mode);
4823 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4824 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4825 }
4826
4827 /* Calculate the rtx_cost of a single instruction. A return value of
4828 zero indicates an instruction pattern without a known cost. */
4829
4830 int
4831 insn_rtx_cost (rtx pat, bool speed)
4832 {
4833 int i, cost;
4834 rtx set;
4835
4836 /* Extract the single set rtx from the instruction pattern.
4837 We can't use single_set since we only have the pattern. */
4838 if (GET_CODE (pat) == SET)
4839 set = pat;
4840 else if (GET_CODE (pat) == PARALLEL)
4841 {
4842 set = NULL_RTX;
4843 for (i = 0; i < XVECLEN (pat, 0); i++)
4844 {
4845 rtx x = XVECEXP (pat, 0, i);
4846 if (GET_CODE (x) == SET)
4847 {
4848 if (set)
4849 return 0;
4850 set = x;
4851 }
4852 }
4853 if (!set)
4854 return 0;
4855 }
4856 else
4857 return 0;
4858
4859 cost = set_src_cost (SET_SRC (set), speed);
4860 return cost > 0 ? cost : COSTS_N_INSNS (1);
4861 }
4862
4863 /* Given an insn INSN and condition COND, return the condition in a
4864 canonical form to simplify testing by callers. Specifically:
4865
4866 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4867 (2) Both operands will be machine operands; (cc0) will have been replaced.
4868 (3) If an operand is a constant, it will be the second operand.
4869 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4870 for GE, GEU, and LEU.
4871
4872 If the condition cannot be understood, or is an inequality floating-point
4873 comparison which needs to be reversed, 0 will be returned.
4874
4875 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4876
4877 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4878 insn used in locating the condition was found. If a replacement test
4879 of the condition is desired, it should be placed in front of that
4880 insn and we will be sure that the inputs are still valid.
4881
4882 If WANT_REG is nonzero, we wish the condition to be relative to that
4883 register, if possible. Therefore, do not canonicalize the condition
4884 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4885 to be a compare to a CC mode register.
4886
4887 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4888 and at INSN. */
4889
4890 rtx
4891 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4892 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4893 {
4894 enum rtx_code code;
4895 rtx prev = insn;
4896 const_rtx set;
4897 rtx tem;
4898 rtx op0, op1;
4899 int reverse_code = 0;
4900 enum machine_mode mode;
4901 basic_block bb = BLOCK_FOR_INSN (insn);
4902
4903 code = GET_CODE (cond);
4904 mode = GET_MODE (cond);
4905 op0 = XEXP (cond, 0);
4906 op1 = XEXP (cond, 1);
4907
4908 if (reverse)
4909 code = reversed_comparison_code (cond, insn);
4910 if (code == UNKNOWN)
4911 return 0;
4912
4913 if (earliest)
4914 *earliest = insn;
4915
4916 /* If we are comparing a register with zero, see if the register is set
4917 in the previous insn to a COMPARE or a comparison operation. Perform
4918 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4919 in cse.c */
4920
4921 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4922 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4923 && op1 == CONST0_RTX (GET_MODE (op0))
4924 && op0 != want_reg)
4925 {
4926 /* Set nonzero when we find something of interest. */
4927 rtx x = 0;
4928
4929 #ifdef HAVE_cc0
4930 /* If comparison with cc0, import actual comparison from compare
4931 insn. */
4932 if (op0 == cc0_rtx)
4933 {
4934 if ((prev = prev_nonnote_insn (prev)) == 0
4935 || !NONJUMP_INSN_P (prev)
4936 || (set = single_set (prev)) == 0
4937 || SET_DEST (set) != cc0_rtx)
4938 return 0;
4939
4940 op0 = SET_SRC (set);
4941 op1 = CONST0_RTX (GET_MODE (op0));
4942 if (earliest)
4943 *earliest = prev;
4944 }
4945 #endif
4946
4947 /* If this is a COMPARE, pick up the two things being compared. */
4948 if (GET_CODE (op0) == COMPARE)
4949 {
4950 op1 = XEXP (op0, 1);
4951 op0 = XEXP (op0, 0);
4952 continue;
4953 }
4954 else if (!REG_P (op0))
4955 break;
4956
4957 /* Go back to the previous insn. Stop if it is not an INSN. We also
4958 stop if it isn't a single set or if it has a REG_INC note because
4959 we don't want to bother dealing with it. */
4960
4961 prev = prev_nonnote_nondebug_insn (prev);
4962
4963 if (prev == 0
4964 || !NONJUMP_INSN_P (prev)
4965 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4966 /* In cfglayout mode, there do not have to be labels at the
4967 beginning of a block, or jumps at the end, so the previous
4968 conditions would not stop us when we reach bb boundary. */
4969 || BLOCK_FOR_INSN (prev) != bb)
4970 break;
4971
4972 set = set_of (op0, prev);
4973
4974 if (set
4975 && (GET_CODE (set) != SET
4976 || !rtx_equal_p (SET_DEST (set), op0)))
4977 break;
4978
4979 /* If this is setting OP0, get what it sets it to if it looks
4980 relevant. */
4981 if (set)
4982 {
4983 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4984 #ifdef FLOAT_STORE_FLAG_VALUE
4985 REAL_VALUE_TYPE fsfv;
4986 #endif
4987
4988 /* ??? We may not combine comparisons done in a CCmode with
4989 comparisons not done in a CCmode. This is to aid targets
4990 like Alpha that have an IEEE compliant EQ instruction, and
4991 a non-IEEE compliant BEQ instruction. The use of CCmode is
4992 actually artificial, simply to prevent the combination, but
4993 should not affect other platforms.
4994
4995 However, we must allow VOIDmode comparisons to match either
4996 CCmode or non-CCmode comparison, because some ports have
4997 modeless comparisons inside branch patterns.
4998
4999 ??? This mode check should perhaps look more like the mode check
5000 in simplify_comparison in combine. */
5001
5002 if ((GET_CODE (SET_SRC (set)) == COMPARE
5003 || (((code == NE
5004 || (code == LT
5005 && val_signbit_known_set_p (inner_mode,
5006 STORE_FLAG_VALUE))
5007 #ifdef FLOAT_STORE_FLAG_VALUE
5008 || (code == LT
5009 && SCALAR_FLOAT_MODE_P (inner_mode)
5010 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5011 REAL_VALUE_NEGATIVE (fsfv)))
5012 #endif
5013 ))
5014 && COMPARISON_P (SET_SRC (set))))
5015 && (((GET_MODE_CLASS (mode) == MODE_CC)
5016 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5017 || mode == VOIDmode || inner_mode == VOIDmode))
5018 x = SET_SRC (set);
5019 else if (((code == EQ
5020 || (code == GE
5021 && val_signbit_known_set_p (inner_mode,
5022 STORE_FLAG_VALUE))
5023 #ifdef FLOAT_STORE_FLAG_VALUE
5024 || (code == GE
5025 && SCALAR_FLOAT_MODE_P (inner_mode)
5026 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5027 REAL_VALUE_NEGATIVE (fsfv)))
5028 #endif
5029 ))
5030 && COMPARISON_P (SET_SRC (set))
5031 && (((GET_MODE_CLASS (mode) == MODE_CC)
5032 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5033 || mode == VOIDmode || inner_mode == VOIDmode))
5034
5035 {
5036 reverse_code = 1;
5037 x = SET_SRC (set);
5038 }
5039 else
5040 break;
5041 }
5042
5043 else if (reg_set_p (op0, prev))
5044 /* If this sets OP0, but not directly, we have to give up. */
5045 break;
5046
5047 if (x)
5048 {
5049 /* If the caller is expecting the condition to be valid at INSN,
5050 make sure X doesn't change before INSN. */
5051 if (valid_at_insn_p)
5052 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5053 break;
5054 if (COMPARISON_P (x))
5055 code = GET_CODE (x);
5056 if (reverse_code)
5057 {
5058 code = reversed_comparison_code (x, prev);
5059 if (code == UNKNOWN)
5060 return 0;
5061 reverse_code = 0;
5062 }
5063
5064 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5065 if (earliest)
5066 *earliest = prev;
5067 }
5068 }
5069
5070 /* If constant is first, put it last. */
5071 if (CONSTANT_P (op0))
5072 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5073
5074 /* If OP0 is the result of a comparison, we weren't able to find what
5075 was really being compared, so fail. */
5076 if (!allow_cc_mode
5077 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5078 return 0;
5079
5080 /* Canonicalize any ordered comparison with integers involving equality
5081 if we can do computations in the relevant mode and we do not
5082 overflow. */
5083
5084 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5085 && CONST_INT_P (op1)
5086 && GET_MODE (op0) != VOIDmode
5087 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5088 {
5089 HOST_WIDE_INT const_val = INTVAL (op1);
5090 unsigned HOST_WIDE_INT uconst_val = const_val;
5091 unsigned HOST_WIDE_INT max_val
5092 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5093
5094 switch (code)
5095 {
5096 case LE:
5097 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5098 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5099 break;
5100
5101 /* When cross-compiling, const_val might be sign-extended from
5102 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5103 case GE:
5104 if ((const_val & max_val)
5105 != ((unsigned HOST_WIDE_INT) 1
5106 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5107 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5108 break;
5109
5110 case LEU:
5111 if (uconst_val < max_val)
5112 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5113 break;
5114
5115 case GEU:
5116 if (uconst_val != 0)
5117 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5118 break;
5119
5120 default:
5121 break;
5122 }
5123 }
5124
5125 /* Never return CC0; return zero instead. */
5126 if (CC0_P (op0))
5127 return 0;
5128
5129 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5130 }
5131
5132 /* Given a jump insn JUMP, return the condition that will cause it to branch
5133 to its JUMP_LABEL. If the condition cannot be understood, or is an
5134 inequality floating-point comparison which needs to be reversed, 0 will
5135 be returned.
5136
5137 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5138 insn used in locating the condition was found. If a replacement test
5139 of the condition is desired, it should be placed in front of that
5140 insn and we will be sure that the inputs are still valid. If EARLIEST
5141 is null, the returned condition will be valid at INSN.
5142
5143 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5144 compare CC mode register.
5145
5146 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5147
5148 rtx
5149 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5150 {
5151 rtx cond;
5152 int reverse;
5153 rtx set;
5154
5155 /* If this is not a standard conditional jump, we can't parse it. */
5156 if (!JUMP_P (jump)
5157 || ! any_condjump_p (jump))
5158 return 0;
5159 set = pc_set (jump);
5160
5161 cond = XEXP (SET_SRC (set), 0);
5162
5163 /* If this branches to JUMP_LABEL when the condition is false, reverse
5164 the condition. */
5165 reverse
5166 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5167 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5168
5169 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5170 allow_cc_mode, valid_at_insn_p);
5171 }
5172
5173 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5174 TARGET_MODE_REP_EXTENDED.
5175
5176 Note that we assume that the property of
5177 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5178 narrower than mode B. I.e., if A is a mode narrower than B then in
5179 order to be able to operate on it in mode B, mode A needs to
5180 satisfy the requirements set by the representation of mode B. */
5181
5182 static void
5183 init_num_sign_bit_copies_in_rep (void)
5184 {
5185 enum machine_mode mode, in_mode;
5186
5187 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5188 in_mode = GET_MODE_WIDER_MODE (mode))
5189 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5190 mode = GET_MODE_WIDER_MODE (mode))
5191 {
5192 enum machine_mode i;
5193
5194 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5195 extends to the next widest mode. */
5196 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5197 || GET_MODE_WIDER_MODE (mode) == in_mode);
5198
5199 /* We are in in_mode. Count how many bits outside of mode
5200 have to be copies of the sign-bit. */
5201 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5202 {
5203 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5204
5205 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5206 /* We can only check sign-bit copies starting from the
5207 top-bit. In order to be able to check the bits we
5208 have already seen we pretend that subsequent bits
5209 have to be sign-bit copies too. */
5210 || num_sign_bit_copies_in_rep [in_mode][mode])
5211 num_sign_bit_copies_in_rep [in_mode][mode]
5212 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5213 }
5214 }
5215 }
5216
5217 /* Suppose that truncation from the machine mode of X to MODE is not a
5218 no-op. See if there is anything special about X so that we can
5219 assume it already contains a truncated value of MODE. */
5220
5221 bool
5222 truncated_to_mode (enum machine_mode mode, const_rtx x)
5223 {
5224 /* This register has already been used in MODE without explicit
5225 truncation. */
5226 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5227 return true;
5228
5229 /* See if we already satisfy the requirements of MODE. If yes we
5230 can just switch to MODE. */
5231 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5232 && (num_sign_bit_copies (x, GET_MODE (x))
5233 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5234 return true;
5235
5236 return false;
5237 }
5238 \f
5239 /* Initialize non_rtx_starting_operands, which is used to speed up
5240 for_each_rtx. */
5241 void
5242 init_rtlanal (void)
5243 {
5244 int i;
5245 for (i = 0; i < NUM_RTX_CODE; i++)
5246 {
5247 const char *format = GET_RTX_FORMAT (i);
5248 const char *first = strpbrk (format, "eEV");
5249 non_rtx_starting_operands[i] = first ? first - format : -1;
5250 }
5251
5252 init_num_sign_bit_copies_in_rep ();
5253 }
5254 \f
5255 /* Check whether this is a constant pool constant. */
5256 bool
5257 constant_pool_constant_p (rtx x)
5258 {
5259 x = avoid_constant_pool_reference (x);
5260 return CONST_DOUBLE_P (x);
5261 }
5262 \f
5263 /* If M is a bitmask that selects a field of low-order bits within an item but
5264 not the entire word, return the length of the field. Return -1 otherwise.
5265 M is used in machine mode MODE. */
5266
5267 int
5268 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5269 {
5270 if (mode != VOIDmode)
5271 {
5272 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5273 return -1;
5274 m &= GET_MODE_MASK (mode);
5275 }
5276
5277 return exact_log2 (m + 1);
5278 }
5279
5280 /* Return the mode of MEM's address. */
5281
5282 enum machine_mode
5283 get_address_mode (rtx mem)
5284 {
5285 enum machine_mode mode;
5286
5287 gcc_assert (MEM_P (mem));
5288 mode = GET_MODE (XEXP (mem, 0));
5289 if (mode != VOIDmode)
5290 return mode;
5291 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5292 }
5293 \f
5294 /* Split up a CONST_DOUBLE or integer constant rtx
5295 into two rtx's for single words,
5296 storing in *FIRST the word that comes first in memory in the target
5297 and in *SECOND the other. */
5298
5299 void
5300 split_double (rtx value, rtx *first, rtx *second)
5301 {
5302 if (CONST_INT_P (value))
5303 {
5304 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5305 {
5306 /* In this case the CONST_INT holds both target words.
5307 Extract the bits from it into two word-sized pieces.
5308 Sign extend each half to HOST_WIDE_INT. */
5309 unsigned HOST_WIDE_INT low, high;
5310 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5311 unsigned bits_per_word = BITS_PER_WORD;
5312
5313 /* Set sign_bit to the most significant bit of a word. */
5314 sign_bit = 1;
5315 sign_bit <<= bits_per_word - 1;
5316
5317 /* Set mask so that all bits of the word are set. We could
5318 have used 1 << BITS_PER_WORD instead of basing the
5319 calculation on sign_bit. However, on machines where
5320 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5321 compiler warning, even though the code would never be
5322 executed. */
5323 mask = sign_bit << 1;
5324 mask--;
5325
5326 /* Set sign_extend as any remaining bits. */
5327 sign_extend = ~mask;
5328
5329 /* Pick the lower word and sign-extend it. */
5330 low = INTVAL (value);
5331 low &= mask;
5332 if (low & sign_bit)
5333 low |= sign_extend;
5334
5335 /* Pick the higher word, shifted to the least significant
5336 bits, and sign-extend it. */
5337 high = INTVAL (value);
5338 high >>= bits_per_word - 1;
5339 high >>= 1;
5340 high &= mask;
5341 if (high & sign_bit)
5342 high |= sign_extend;
5343
5344 /* Store the words in the target machine order. */
5345 if (WORDS_BIG_ENDIAN)
5346 {
5347 *first = GEN_INT (high);
5348 *second = GEN_INT (low);
5349 }
5350 else
5351 {
5352 *first = GEN_INT (low);
5353 *second = GEN_INT (high);
5354 }
5355 }
5356 else
5357 {
5358 /* The rule for using CONST_INT for a wider mode
5359 is that we regard the value as signed.
5360 So sign-extend it. */
5361 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5362 if (WORDS_BIG_ENDIAN)
5363 {
5364 *first = high;
5365 *second = value;
5366 }
5367 else
5368 {
5369 *first = value;
5370 *second = high;
5371 }
5372 }
5373 }
5374 else if (!CONST_DOUBLE_P (value))
5375 {
5376 if (WORDS_BIG_ENDIAN)
5377 {
5378 *first = const0_rtx;
5379 *second = value;
5380 }
5381 else
5382 {
5383 *first = value;
5384 *second = const0_rtx;
5385 }
5386 }
5387 else if (GET_MODE (value) == VOIDmode
5388 /* This is the old way we did CONST_DOUBLE integers. */
5389 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5390 {
5391 /* In an integer, the words are defined as most and least significant.
5392 So order them by the target's convention. */
5393 if (WORDS_BIG_ENDIAN)
5394 {
5395 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5396 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5397 }
5398 else
5399 {
5400 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5401 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5402 }
5403 }
5404 else
5405 {
5406 REAL_VALUE_TYPE r;
5407 long l[2];
5408 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5409
5410 /* Note, this converts the REAL_VALUE_TYPE to the target's
5411 format, splits up the floating point double and outputs
5412 exactly 32 bits of it into each of l[0] and l[1] --
5413 not necessarily BITS_PER_WORD bits. */
5414 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5415
5416 /* If 32 bits is an entire word for the target, but not for the host,
5417 then sign-extend on the host so that the number will look the same
5418 way on the host that it would on the target. See for instance
5419 simplify_unary_operation. The #if is needed to avoid compiler
5420 warnings. */
5421
5422 #if HOST_BITS_PER_LONG > 32
5423 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5424 {
5425 if (l[0] & ((long) 1 << 31))
5426 l[0] |= ((long) (-1) << 32);
5427 if (l[1] & ((long) 1 << 31))
5428 l[1] |= ((long) (-1) << 32);
5429 }
5430 #endif
5431
5432 *first = GEN_INT (l[0]);
5433 *second = GEN_INT (l[1]);
5434 }
5435 }
5436
5437 /* Strip outer address "mutations" from LOC and return a pointer to the
5438 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5439 stripped expression there.
5440
5441 "Mutations" either convert between modes or apply some kind of
5442 alignment. */
5443
5444 rtx *
5445 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5446 {
5447 for (;;)
5448 {
5449 enum rtx_code code = GET_CODE (*loc);
5450 if (GET_RTX_CLASS (code) == RTX_UNARY)
5451 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5452 used to convert between pointer sizes. */
5453 loc = &XEXP (*loc, 0);
5454 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5455 /* (and ... (const_int -X)) is used to align to X bytes. */
5456 loc = &XEXP (*loc, 0);
5457 else if (code == SUBREG
5458 && !OBJECT_P (SUBREG_REG (*loc))
5459 && subreg_lowpart_p (*loc))
5460 /* (subreg (operator ...) ...) inside and is used for mode
5461 conversion too. */
5462 loc = &SUBREG_REG (*loc);
5463 else
5464 return loc;
5465 if (outer_code)
5466 *outer_code = code;
5467 }
5468 }
5469
5470 /* Return true if X must be a base rather than an index. */
5471
5472 static bool
5473 must_be_base_p (rtx x)
5474 {
5475 return GET_CODE (x) == LO_SUM;
5476 }
5477
5478 /* Return true if X must be an index rather than a base. */
5479
5480 static bool
5481 must_be_index_p (rtx x)
5482 {
5483 return GET_CODE (x) == MULT || GET_CODE (x) == ASHIFT;
5484 }
5485
5486 /* Set the segment part of address INFO to LOC, given that INNER is the
5487 unmutated value. */
5488
5489 static void
5490 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5491 {
5492 gcc_checking_assert (GET_CODE (*inner) == UNSPEC);
5493
5494 gcc_assert (!info->segment);
5495 info->segment = loc;
5496 info->segment_term = inner;
5497 }
5498
5499 /* Set the base part of address INFO to LOC, given that INNER is the
5500 unmutated value. */
5501
5502 static void
5503 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5504 {
5505 if (GET_CODE (*inner) == LO_SUM)
5506 inner = strip_address_mutations (&XEXP (*inner, 0));
5507 gcc_checking_assert (REG_P (*inner)
5508 || MEM_P (*inner)
5509 || GET_CODE (*inner) == SUBREG);
5510
5511 gcc_assert (!info->base);
5512 info->base = loc;
5513 info->base_term = inner;
5514 }
5515
5516 /* Set the index part of address INFO to LOC, given that INNER is the
5517 unmutated value. */
5518
5519 static void
5520 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5521 {
5522 if ((GET_CODE (*inner) == MULT || GET_CODE (*inner) == ASHIFT)
5523 && CONSTANT_P (XEXP (*inner, 1)))
5524 inner = strip_address_mutations (&XEXP (*inner, 0));
5525 gcc_checking_assert (REG_P (*inner)
5526 || MEM_P (*inner)
5527 || GET_CODE (*inner) == SUBREG);
5528
5529 gcc_assert (!info->index);
5530 info->index = loc;
5531 info->index_term = inner;
5532 }
5533
5534 /* Set the displacement part of address INFO to LOC, given that INNER
5535 is the constant term. */
5536
5537 static void
5538 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5539 {
5540 gcc_checking_assert (CONSTANT_P (*inner));
5541
5542 gcc_assert (!info->disp);
5543 info->disp = loc;
5544 info->disp_term = inner;
5545 }
5546
5547 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5548 rest of INFO accordingly. */
5549
5550 static void
5551 decompose_incdec_address (struct address_info *info)
5552 {
5553 info->autoinc_p = true;
5554
5555 rtx *base = &XEXP (*info->inner, 0);
5556 set_address_base (info, base, base);
5557 gcc_checking_assert (info->base == info->base_term);
5558
5559 /* These addresses are only valid when the size of the addressed
5560 value is known. */
5561 gcc_checking_assert (info->mode != VOIDmode);
5562 }
5563
5564 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5565 of INFO accordingly. */
5566
5567 static void
5568 decompose_automod_address (struct address_info *info)
5569 {
5570 info->autoinc_p = true;
5571
5572 rtx *base = &XEXP (*info->inner, 0);
5573 set_address_base (info, base, base);
5574 gcc_checking_assert (info->base == info->base_term);
5575
5576 rtx plus = XEXP (*info->inner, 1);
5577 gcc_assert (GET_CODE (plus) == PLUS);
5578
5579 info->base_term2 = &XEXP (plus, 0);
5580 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5581
5582 rtx *step = &XEXP (plus, 1);
5583 rtx *inner_step = strip_address_mutations (step);
5584 if (CONSTANT_P (*inner_step))
5585 set_address_disp (info, step, inner_step);
5586 else
5587 set_address_index (info, step, inner_step);
5588 }
5589
5590 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5591 values in [PTR, END). Return a pointer to the end of the used array. */
5592
5593 static rtx **
5594 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5595 {
5596 rtx x = *loc;
5597 if (GET_CODE (x) == PLUS)
5598 {
5599 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5600 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5601 }
5602 else
5603 {
5604 gcc_assert (ptr != end);
5605 *ptr++ = loc;
5606 }
5607 return ptr;
5608 }
5609
5610 /* Evaluate the likelihood of X being a base or index value, returning
5611 positive if it is likely to be a base, negative if it is likely to be
5612 an index, and 0 if we can't tell. Make the magnitude of the return
5613 value reflect the amount of confidence we have in the answer.
5614
5615 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5616
5617 static int
5618 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5619 enum rtx_code outer_code, enum rtx_code index_code)
5620 {
5621 /* See whether we can be certain. */
5622 if (must_be_base_p (x))
5623 return 3;
5624 if (must_be_index_p (x))
5625 return -3;
5626
5627 /* Believe *_POINTER unless the address shape requires otherwise. */
5628 if (REG_P (x) && REG_POINTER (x))
5629 return 2;
5630 if (MEM_P (x) && MEM_POINTER (x))
5631 return 2;
5632
5633 if (REG_P (x) && HARD_REGISTER_P (x))
5634 {
5635 /* X is a hard register. If it only fits one of the base
5636 or index classes, choose that interpretation. */
5637 int regno = REGNO (x);
5638 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5639 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5640 if (base_p != index_p)
5641 return base_p ? 1 : -1;
5642 }
5643 return 0;
5644 }
5645
5646 /* INFO->INNER describes a normal, non-automodified address.
5647 Fill in the rest of INFO accordingly. */
5648
5649 static void
5650 decompose_normal_address (struct address_info *info)
5651 {
5652 /* Treat the address as the sum of up to four values. */
5653 rtx *ops[4];
5654 size_t n_ops = extract_plus_operands (info->inner, ops,
5655 ops + ARRAY_SIZE (ops)) - ops;
5656
5657 /* If there is more than one component, any base component is in a PLUS. */
5658 if (n_ops > 1)
5659 info->base_outer_code = PLUS;
5660
5661 /* Separate the parts that contain a REG or MEM from those that don't.
5662 Record the latter in INFO and leave the former in OPS. */
5663 rtx *inner_ops[4];
5664 size_t out = 0;
5665 for (size_t in = 0; in < n_ops; ++in)
5666 {
5667 rtx *loc = ops[in];
5668 rtx *inner = strip_address_mutations (loc);
5669 if (CONSTANT_P (*inner))
5670 set_address_disp (info, loc, inner);
5671 else if (GET_CODE (*inner) == UNSPEC)
5672 set_address_segment (info, loc, inner);
5673 else
5674 {
5675 ops[out] = loc;
5676 inner_ops[out] = inner;
5677 ++out;
5678 }
5679 }
5680
5681 /* Classify the remaining OPS members as bases and indexes. */
5682 if (out == 1)
5683 {
5684 /* Assume that the remaining value is a base unless the shape
5685 requires otherwise. */
5686 if (!must_be_index_p (*inner_ops[0]))
5687 set_address_base (info, ops[0], inner_ops[0]);
5688 else
5689 set_address_index (info, ops[0], inner_ops[0]);
5690 }
5691 else if (out == 2)
5692 {
5693 /* In the event of a tie, assume the base comes first. */
5694 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5695 GET_CODE (*ops[1]))
5696 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5697 GET_CODE (*ops[0])))
5698 {
5699 set_address_base (info, ops[0], inner_ops[0]);
5700 set_address_index (info, ops[1], inner_ops[1]);
5701 }
5702 else
5703 {
5704 set_address_base (info, ops[1], inner_ops[1]);
5705 set_address_index (info, ops[0], inner_ops[0]);
5706 }
5707 }
5708 else
5709 gcc_assert (out == 0);
5710 }
5711
5712 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5713 or VOIDmode if not known. AS is the address space associated with LOC.
5714 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5715
5716 void
5717 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5718 addr_space_t as, enum rtx_code outer_code)
5719 {
5720 memset (info, 0, sizeof (*info));
5721 info->mode = mode;
5722 info->as = as;
5723 info->addr_outer_code = outer_code;
5724 info->outer = loc;
5725 info->inner = strip_address_mutations (loc, &outer_code);
5726 info->base_outer_code = outer_code;
5727 switch (GET_CODE (*info->inner))
5728 {
5729 case PRE_DEC:
5730 case PRE_INC:
5731 case POST_DEC:
5732 case POST_INC:
5733 decompose_incdec_address (info);
5734 break;
5735
5736 case PRE_MODIFY:
5737 case POST_MODIFY:
5738 decompose_automod_address (info);
5739 break;
5740
5741 default:
5742 decompose_normal_address (info);
5743 break;
5744 }
5745 }
5746
5747 /* Describe address operand LOC in INFO. */
5748
5749 void
5750 decompose_lea_address (struct address_info *info, rtx *loc)
5751 {
5752 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5753 }
5754
5755 /* Describe the address of MEM X in INFO. */
5756
5757 void
5758 decompose_mem_address (struct address_info *info, rtx x)
5759 {
5760 gcc_assert (MEM_P (x));
5761 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5762 MEM_ADDR_SPACE (x), MEM);
5763 }
5764
5765 /* Update INFO after a change to the address it describes. */
5766
5767 void
5768 update_address (struct address_info *info)
5769 {
5770 decompose_address (info, info->outer, info->mode, info->as,
5771 info->addr_outer_code);
5772 }
5773
5774 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5775 more complicated than that. */
5776
5777 HOST_WIDE_INT
5778 get_index_scale (const struct address_info *info)
5779 {
5780 rtx index = *info->index;
5781 if (GET_CODE (index) == MULT
5782 && CONST_INT_P (XEXP (index, 1))
5783 && info->index_term == &XEXP (index, 0))
5784 return INTVAL (XEXP (index, 1));
5785
5786 if (GET_CODE (index) == ASHIFT
5787 && CONST_INT_P (XEXP (index, 1))
5788 && info->index_term == &XEXP (index, 0))
5789 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
5790
5791 if (info->index == info->index_term)
5792 return 1;
5793
5794 return 0;
5795 }
5796
5797 /* Return the "index code" of INFO, in the form required by
5798 ok_for_base_p_1. */
5799
5800 enum rtx_code
5801 get_index_code (const struct address_info *info)
5802 {
5803 if (info->index)
5804 return GET_CODE (*info->index);
5805
5806 if (info->disp)
5807 return GET_CODE (*info->disp);
5808
5809 return SCRATCH;
5810 }