expmed.c (struct init_expmed_rtl): Change all fields but pow2 and cint from struct...
[gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 \f
81 /* Return 1 if the value of X is unstable
82 (would be different at a different point in the program).
83 The frame pointer, arg pointer, etc. are considered stable
84 (within one function) and so is anything marked `unchanging'. */
85
86 int
87 rtx_unstable_p (const_rtx x)
88 {
89 const RTX_CODE code = GET_CODE (x);
90 int i;
91 const char *fmt;
92
93 switch (code)
94 {
95 case MEM:
96 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
97
98 case CONST:
99 CASE_CONST_ANY:
100 case SYMBOL_REF:
101 case LABEL_REF:
102 return 0;
103
104 case REG:
105 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
106 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
107 /* The arg pointer varies if it is not a fixed register. */
108 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
109 return 0;
110 /* ??? When call-clobbered, the value is stable modulo the restore
111 that must happen after a call. This currently screws up local-alloc
112 into believing that the restore is not needed. */
113 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
114 return 0;
115 return 1;
116
117 case ASM_OPERANDS:
118 if (MEM_VOLATILE_P (x))
119 return 1;
120
121 /* Fall through. */
122
123 default:
124 break;
125 }
126
127 fmt = GET_RTX_FORMAT (code);
128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
129 if (fmt[i] == 'e')
130 {
131 if (rtx_unstable_p (XEXP (x, i)))
132 return 1;
133 }
134 else if (fmt[i] == 'E')
135 {
136 int j;
137 for (j = 0; j < XVECLEN (x, i); j++)
138 if (rtx_unstable_p (XVECEXP (x, i, j)))
139 return 1;
140 }
141
142 return 0;
143 }
144
145 /* Return 1 if X has a value that can vary even between two
146 executions of the program. 0 means X can be compared reliably
147 against certain constants or near-constants.
148 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
149 zero, we are slightly more conservative.
150 The frame pointer and the arg pointer are considered constant. */
151
152 bool
153 rtx_varies_p (const_rtx x, bool for_alias)
154 {
155 RTX_CODE code;
156 int i;
157 const char *fmt;
158
159 if (!x)
160 return 0;
161
162 code = GET_CODE (x);
163 switch (code)
164 {
165 case MEM:
166 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
167
168 case CONST:
169 CASE_CONST_ANY:
170 case SYMBOL_REF:
171 case LABEL_REF:
172 return 0;
173
174 case REG:
175 /* Note that we have to test for the actual rtx used for the frame
176 and arg pointers and not just the register number in case we have
177 eliminated the frame and/or arg pointer and are using it
178 for pseudos. */
179 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
180 /* The arg pointer varies if it is not a fixed register. */
181 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
182 return 0;
183 if (x == pic_offset_table_rtx
184 /* ??? When call-clobbered, the value is stable modulo the restore
185 that must happen after a call. This currently screws up
186 local-alloc into believing that the restore is not needed, so we
187 must return 0 only if we are called from alias analysis. */
188 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
189 return 0;
190 return 1;
191
192 case LO_SUM:
193 /* The operand 0 of a LO_SUM is considered constant
194 (in fact it is related specifically to operand 1)
195 during alias analysis. */
196 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
197 || rtx_varies_p (XEXP (x, 1), for_alias);
198
199 case ASM_OPERANDS:
200 if (MEM_VOLATILE_P (x))
201 return 1;
202
203 /* Fall through. */
204
205 default:
206 break;
207 }
208
209 fmt = GET_RTX_FORMAT (code);
210 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
211 if (fmt[i] == 'e')
212 {
213 if (rtx_varies_p (XEXP (x, i), for_alias))
214 return 1;
215 }
216 else if (fmt[i] == 'E')
217 {
218 int j;
219 for (j = 0; j < XVECLEN (x, i); j++)
220 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
221 return 1;
222 }
223
224 return 0;
225 }
226
227 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
228 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
229 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
230 references on strict alignment machines. */
231
232 static int
233 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
234 enum machine_mode mode, bool unaligned_mems)
235 {
236 enum rtx_code code = GET_CODE (x);
237
238 /* The offset must be a multiple of the mode size if we are considering
239 unaligned memory references on strict alignment machines. */
240 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
241 {
242 HOST_WIDE_INT actual_offset = offset;
243
244 #ifdef SPARC_STACK_BOUNDARY_HACK
245 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
246 the real alignment of %sp. However, when it does this, the
247 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
248 if (SPARC_STACK_BOUNDARY_HACK
249 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
250 actual_offset -= STACK_POINTER_OFFSET;
251 #endif
252
253 if (actual_offset % GET_MODE_SIZE (mode) != 0)
254 return 1;
255 }
256
257 switch (code)
258 {
259 case SYMBOL_REF:
260 if (SYMBOL_REF_WEAK (x))
261 return 1;
262 if (!CONSTANT_POOL_ADDRESS_P (x))
263 {
264 tree decl;
265 HOST_WIDE_INT decl_size;
266
267 if (offset < 0)
268 return 1;
269 if (size == 0)
270 size = GET_MODE_SIZE (mode);
271 if (size == 0)
272 return offset != 0;
273
274 /* If the size of the access or of the symbol is unknown,
275 assume the worst. */
276 decl = SYMBOL_REF_DECL (x);
277
278 /* Else check that the access is in bounds. TODO: restructure
279 expr_size/tree_expr_size/int_expr_size and just use the latter. */
280 if (!decl)
281 decl_size = -1;
282 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
283 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
284 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
285 : -1);
286 else if (TREE_CODE (decl) == STRING_CST)
287 decl_size = TREE_STRING_LENGTH (decl);
288 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
289 decl_size = int_size_in_bytes (TREE_TYPE (decl));
290 else
291 decl_size = -1;
292
293 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
294 }
295
296 return 0;
297
298 case LABEL_REF:
299 return 0;
300
301 case REG:
302 /* Stack references are assumed not to trap, but we need to deal with
303 nonsensical offsets. */
304 if (x == frame_pointer_rtx)
305 {
306 HOST_WIDE_INT adj_offset = offset - STARTING_FRAME_OFFSET;
307 if (size == 0)
308 size = GET_MODE_SIZE (mode);
309 if (FRAME_GROWS_DOWNWARD)
310 {
311 if (adj_offset < frame_offset || adj_offset + size - 1 >= 0)
312 return 1;
313 }
314 else
315 {
316 if (adj_offset < 0 || adj_offset + size - 1 >= frame_offset)
317 return 1;
318 }
319 return 0;
320 }
321 /* ??? Need to add a similar guard for nonsensical offsets. */
322 if (x == hard_frame_pointer_rtx
323 || x == stack_pointer_rtx
324 /* The arg pointer varies if it is not a fixed register. */
325 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
326 return 0;
327 /* All of the virtual frame registers are stack references. */
328 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
329 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
330 return 0;
331 return 1;
332
333 case CONST:
334 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
335 mode, unaligned_mems);
336
337 case PLUS:
338 /* An address is assumed not to trap if:
339 - it is the pic register plus a constant. */
340 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
341 return 0;
342
343 /* - or it is an address that can't trap plus a constant integer. */
344 if (CONST_INT_P (XEXP (x, 1))
345 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
346 size, mode, unaligned_mems))
347 return 0;
348
349 return 1;
350
351 case LO_SUM:
352 case PRE_MODIFY:
353 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
354 mode, unaligned_mems);
355
356 case PRE_DEC:
357 case PRE_INC:
358 case POST_DEC:
359 case POST_INC:
360 case POST_MODIFY:
361 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
362 mode, unaligned_mems);
363
364 default:
365 break;
366 }
367
368 /* If it isn't one of the case above, it can cause a trap. */
369 return 1;
370 }
371
372 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
373
374 int
375 rtx_addr_can_trap_p (const_rtx x)
376 {
377 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
378 }
379
380 /* Return true if X is an address that is known to not be zero. */
381
382 bool
383 nonzero_address_p (const_rtx x)
384 {
385 const enum rtx_code code = GET_CODE (x);
386
387 switch (code)
388 {
389 case SYMBOL_REF:
390 return !SYMBOL_REF_WEAK (x);
391
392 case LABEL_REF:
393 return true;
394
395 case REG:
396 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
397 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
398 || x == stack_pointer_rtx
399 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
400 return true;
401 /* All of the virtual frame registers are stack references. */
402 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
403 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
404 return true;
405 return false;
406
407 case CONST:
408 return nonzero_address_p (XEXP (x, 0));
409
410 case PLUS:
411 /* Handle PIC references. */
412 if (XEXP (x, 0) == pic_offset_table_rtx
413 && CONSTANT_P (XEXP (x, 1)))
414 return true;
415 return false;
416
417 case PRE_MODIFY:
418 /* Similar to the above; allow positive offsets. Further, since
419 auto-inc is only allowed in memories, the register must be a
420 pointer. */
421 if (CONST_INT_P (XEXP (x, 1))
422 && INTVAL (XEXP (x, 1)) > 0)
423 return true;
424 return nonzero_address_p (XEXP (x, 0));
425
426 case PRE_INC:
427 /* Similarly. Further, the offset is always positive. */
428 return true;
429
430 case PRE_DEC:
431 case POST_DEC:
432 case POST_INC:
433 case POST_MODIFY:
434 return nonzero_address_p (XEXP (x, 0));
435
436 case LO_SUM:
437 return nonzero_address_p (XEXP (x, 1));
438
439 default:
440 break;
441 }
442
443 /* If it isn't one of the case above, might be zero. */
444 return false;
445 }
446
447 /* Return 1 if X refers to a memory location whose address
448 cannot be compared reliably with constant addresses,
449 or if X refers to a BLKmode memory object.
450 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
451 zero, we are slightly more conservative. */
452
453 bool
454 rtx_addr_varies_p (const_rtx x, bool for_alias)
455 {
456 enum rtx_code code;
457 int i;
458 const char *fmt;
459
460 if (x == 0)
461 return 0;
462
463 code = GET_CODE (x);
464 if (code == MEM)
465 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
466
467 fmt = GET_RTX_FORMAT (code);
468 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
469 if (fmt[i] == 'e')
470 {
471 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
472 return 1;
473 }
474 else if (fmt[i] == 'E')
475 {
476 int j;
477 for (j = 0; j < XVECLEN (x, i); j++)
478 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
479 return 1;
480 }
481 return 0;
482 }
483 \f
484 /* Return the CALL in X if there is one. */
485
486 rtx
487 get_call_rtx_from (rtx x)
488 {
489 if (INSN_P (x))
490 x = PATTERN (x);
491 if (GET_CODE (x) == PARALLEL)
492 x = XVECEXP (x, 0, 0);
493 if (GET_CODE (x) == SET)
494 x = SET_SRC (x);
495 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
496 return x;
497 return NULL_RTX;
498 }
499 \f
500 /* Return the value of the integer term in X, if one is apparent;
501 otherwise return 0.
502 Only obvious integer terms are detected.
503 This is used in cse.c with the `related_value' field. */
504
505 HOST_WIDE_INT
506 get_integer_term (const_rtx x)
507 {
508 if (GET_CODE (x) == CONST)
509 x = XEXP (x, 0);
510
511 if (GET_CODE (x) == MINUS
512 && CONST_INT_P (XEXP (x, 1)))
513 return - INTVAL (XEXP (x, 1));
514 if (GET_CODE (x) == PLUS
515 && CONST_INT_P (XEXP (x, 1)))
516 return INTVAL (XEXP (x, 1));
517 return 0;
518 }
519
520 /* If X is a constant, return the value sans apparent integer term;
521 otherwise return 0.
522 Only obvious integer terms are detected. */
523
524 rtx
525 get_related_value (const_rtx x)
526 {
527 if (GET_CODE (x) != CONST)
528 return 0;
529 x = XEXP (x, 0);
530 if (GET_CODE (x) == PLUS
531 && CONST_INT_P (XEXP (x, 1)))
532 return XEXP (x, 0);
533 else if (GET_CODE (x) == MINUS
534 && CONST_INT_P (XEXP (x, 1)))
535 return XEXP (x, 0);
536 return 0;
537 }
538 \f
539 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
540 to somewhere in the same object or object_block as SYMBOL. */
541
542 bool
543 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
544 {
545 tree decl;
546
547 if (GET_CODE (symbol) != SYMBOL_REF)
548 return false;
549
550 if (offset == 0)
551 return true;
552
553 if (offset > 0)
554 {
555 if (CONSTANT_POOL_ADDRESS_P (symbol)
556 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
557 return true;
558
559 decl = SYMBOL_REF_DECL (symbol);
560 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
561 return true;
562 }
563
564 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
565 && SYMBOL_REF_BLOCK (symbol)
566 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
567 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
568 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
569 return true;
570
571 return false;
572 }
573
574 /* Split X into a base and a constant offset, storing them in *BASE_OUT
575 and *OFFSET_OUT respectively. */
576
577 void
578 split_const (rtx x, rtx *base_out, rtx *offset_out)
579 {
580 if (GET_CODE (x) == CONST)
581 {
582 x = XEXP (x, 0);
583 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
584 {
585 *base_out = XEXP (x, 0);
586 *offset_out = XEXP (x, 1);
587 return;
588 }
589 }
590 *base_out = x;
591 *offset_out = const0_rtx;
592 }
593 \f
594 /* Return the number of places FIND appears within X. If COUNT_DEST is
595 zero, we do not count occurrences inside the destination of a SET. */
596
597 int
598 count_occurrences (const_rtx x, const_rtx find, int count_dest)
599 {
600 int i, j;
601 enum rtx_code code;
602 const char *format_ptr;
603 int count;
604
605 if (x == find)
606 return 1;
607
608 code = GET_CODE (x);
609
610 switch (code)
611 {
612 case REG:
613 CASE_CONST_ANY:
614 case SYMBOL_REF:
615 case CODE_LABEL:
616 case PC:
617 case CC0:
618 return 0;
619
620 case EXPR_LIST:
621 count = count_occurrences (XEXP (x, 0), find, count_dest);
622 if (XEXP (x, 1))
623 count += count_occurrences (XEXP (x, 1), find, count_dest);
624 return count;
625
626 case MEM:
627 if (MEM_P (find) && rtx_equal_p (x, find))
628 return 1;
629 break;
630
631 case SET:
632 if (SET_DEST (x) == find && ! count_dest)
633 return count_occurrences (SET_SRC (x), find, count_dest);
634 break;
635
636 default:
637 break;
638 }
639
640 format_ptr = GET_RTX_FORMAT (code);
641 count = 0;
642
643 for (i = 0; i < GET_RTX_LENGTH (code); i++)
644 {
645 switch (*format_ptr++)
646 {
647 case 'e':
648 count += count_occurrences (XEXP (x, i), find, count_dest);
649 break;
650
651 case 'E':
652 for (j = 0; j < XVECLEN (x, i); j++)
653 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
654 break;
655 }
656 }
657 return count;
658 }
659
660 \f
661 /* Return TRUE if OP is a register or subreg of a register that
662 holds an unsigned quantity. Otherwise, return FALSE. */
663
664 bool
665 unsigned_reg_p (rtx op)
666 {
667 if (REG_P (op)
668 && REG_EXPR (op)
669 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
670 return true;
671
672 if (GET_CODE (op) == SUBREG
673 && SUBREG_PROMOTED_UNSIGNED_P (op))
674 return true;
675
676 return false;
677 }
678
679 \f
680 /* Nonzero if register REG appears somewhere within IN.
681 Also works if REG is not a register; in this case it checks
682 for a subexpression of IN that is Lisp "equal" to REG. */
683
684 int
685 reg_mentioned_p (const_rtx reg, const_rtx in)
686 {
687 const char *fmt;
688 int i;
689 enum rtx_code code;
690
691 if (in == 0)
692 return 0;
693
694 if (reg == in)
695 return 1;
696
697 if (GET_CODE (in) == LABEL_REF)
698 return reg == XEXP (in, 0);
699
700 code = GET_CODE (in);
701
702 switch (code)
703 {
704 /* Compare registers by number. */
705 case REG:
706 return REG_P (reg) && REGNO (in) == REGNO (reg);
707
708 /* These codes have no constituent expressions
709 and are unique. */
710 case SCRATCH:
711 case CC0:
712 case PC:
713 return 0;
714
715 CASE_CONST_ANY:
716 /* These are kept unique for a given value. */
717 return 0;
718
719 default:
720 break;
721 }
722
723 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
724 return 1;
725
726 fmt = GET_RTX_FORMAT (code);
727
728 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
729 {
730 if (fmt[i] == 'E')
731 {
732 int j;
733 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
734 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
735 return 1;
736 }
737 else if (fmt[i] == 'e'
738 && reg_mentioned_p (reg, XEXP (in, i)))
739 return 1;
740 }
741 return 0;
742 }
743 \f
744 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
745 no CODE_LABEL insn. */
746
747 int
748 no_labels_between_p (const_rtx beg, const_rtx end)
749 {
750 rtx p;
751 if (beg == end)
752 return 0;
753 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
754 if (LABEL_P (p))
755 return 0;
756 return 1;
757 }
758
759 /* Nonzero if register REG is used in an insn between
760 FROM_INSN and TO_INSN (exclusive of those two). */
761
762 int
763 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
764 {
765 rtx insn;
766
767 if (from_insn == to_insn)
768 return 0;
769
770 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
771 if (NONDEBUG_INSN_P (insn)
772 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
773 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
774 return 1;
775 return 0;
776 }
777 \f
778 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
779 is entirely replaced by a new value and the only use is as a SET_DEST,
780 we do not consider it a reference. */
781
782 int
783 reg_referenced_p (const_rtx x, const_rtx body)
784 {
785 int i;
786
787 switch (GET_CODE (body))
788 {
789 case SET:
790 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
791 return 1;
792
793 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
794 of a REG that occupies all of the REG, the insn references X if
795 it is mentioned in the destination. */
796 if (GET_CODE (SET_DEST (body)) != CC0
797 && GET_CODE (SET_DEST (body)) != PC
798 && !REG_P (SET_DEST (body))
799 && ! (GET_CODE (SET_DEST (body)) == SUBREG
800 && REG_P (SUBREG_REG (SET_DEST (body)))
801 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
802 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
803 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
804 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
805 && reg_overlap_mentioned_p (x, SET_DEST (body)))
806 return 1;
807 return 0;
808
809 case ASM_OPERANDS:
810 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
811 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
812 return 1;
813 return 0;
814
815 case CALL:
816 case USE:
817 case IF_THEN_ELSE:
818 return reg_overlap_mentioned_p (x, body);
819
820 case TRAP_IF:
821 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
822
823 case PREFETCH:
824 return reg_overlap_mentioned_p (x, XEXP (body, 0));
825
826 case UNSPEC:
827 case UNSPEC_VOLATILE:
828 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
829 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
830 return 1;
831 return 0;
832
833 case PARALLEL:
834 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
835 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
836 return 1;
837 return 0;
838
839 case CLOBBER:
840 if (MEM_P (XEXP (body, 0)))
841 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
842 return 1;
843 return 0;
844
845 case COND_EXEC:
846 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
847 return 1;
848 return reg_referenced_p (x, COND_EXEC_CODE (body));
849
850 default:
851 return 0;
852 }
853 }
854 \f
855 /* Nonzero if register REG is set or clobbered in an insn between
856 FROM_INSN and TO_INSN (exclusive of those two). */
857
858 int
859 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
860 {
861 const_rtx insn;
862
863 if (from_insn == to_insn)
864 return 0;
865
866 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
867 if (INSN_P (insn) && reg_set_p (reg, insn))
868 return 1;
869 return 0;
870 }
871
872 /* Internals of reg_set_between_p. */
873 int
874 reg_set_p (const_rtx reg, const_rtx insn)
875 {
876 /* We can be passed an insn or part of one. If we are passed an insn,
877 check if a side-effect of the insn clobbers REG. */
878 if (INSN_P (insn)
879 && (FIND_REG_INC_NOTE (insn, reg)
880 || (CALL_P (insn)
881 && ((REG_P (reg)
882 && REGNO (reg) < FIRST_PSEUDO_REGISTER
883 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
884 GET_MODE (reg), REGNO (reg)))
885 || MEM_P (reg)
886 || find_reg_fusage (insn, CLOBBER, reg)))))
887 return 1;
888
889 return set_of (reg, insn) != NULL_RTX;
890 }
891
892 /* Similar to reg_set_between_p, but check all registers in X. Return 0
893 only if none of them are modified between START and END. Return 1 if
894 X contains a MEM; this routine does use memory aliasing. */
895
896 int
897 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
898 {
899 const enum rtx_code code = GET_CODE (x);
900 const char *fmt;
901 int i, j;
902 rtx insn;
903
904 if (start == end)
905 return 0;
906
907 switch (code)
908 {
909 CASE_CONST_ANY:
910 case CONST:
911 case SYMBOL_REF:
912 case LABEL_REF:
913 return 0;
914
915 case PC:
916 case CC0:
917 return 1;
918
919 case MEM:
920 if (modified_between_p (XEXP (x, 0), start, end))
921 return 1;
922 if (MEM_READONLY_P (x))
923 return 0;
924 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
925 if (memory_modified_in_insn_p (x, insn))
926 return 1;
927 return 0;
928 break;
929
930 case REG:
931 return reg_set_between_p (x, start, end);
932
933 default:
934 break;
935 }
936
937 fmt = GET_RTX_FORMAT (code);
938 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
939 {
940 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
941 return 1;
942
943 else if (fmt[i] == 'E')
944 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
945 if (modified_between_p (XVECEXP (x, i, j), start, end))
946 return 1;
947 }
948
949 return 0;
950 }
951
952 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
953 of them are modified in INSN. Return 1 if X contains a MEM; this routine
954 does use memory aliasing. */
955
956 int
957 modified_in_p (const_rtx x, const_rtx insn)
958 {
959 const enum rtx_code code = GET_CODE (x);
960 const char *fmt;
961 int i, j;
962
963 switch (code)
964 {
965 CASE_CONST_ANY:
966 case CONST:
967 case SYMBOL_REF:
968 case LABEL_REF:
969 return 0;
970
971 case PC:
972 case CC0:
973 return 1;
974
975 case MEM:
976 if (modified_in_p (XEXP (x, 0), insn))
977 return 1;
978 if (MEM_READONLY_P (x))
979 return 0;
980 if (memory_modified_in_insn_p (x, insn))
981 return 1;
982 return 0;
983 break;
984
985 case REG:
986 return reg_set_p (x, insn);
987
988 default:
989 break;
990 }
991
992 fmt = GET_RTX_FORMAT (code);
993 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
994 {
995 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
996 return 1;
997
998 else if (fmt[i] == 'E')
999 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1000 if (modified_in_p (XVECEXP (x, i, j), insn))
1001 return 1;
1002 }
1003
1004 return 0;
1005 }
1006 \f
1007 /* Helper function for set_of. */
1008 struct set_of_data
1009 {
1010 const_rtx found;
1011 const_rtx pat;
1012 };
1013
1014 static void
1015 set_of_1 (rtx x, const_rtx pat, void *data1)
1016 {
1017 struct set_of_data *const data = (struct set_of_data *) (data1);
1018 if (rtx_equal_p (x, data->pat)
1019 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1020 data->found = pat;
1021 }
1022
1023 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1024 (either directly or via STRICT_LOW_PART and similar modifiers). */
1025 const_rtx
1026 set_of (const_rtx pat, const_rtx insn)
1027 {
1028 struct set_of_data data;
1029 data.found = NULL_RTX;
1030 data.pat = pat;
1031 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1032 return data.found;
1033 }
1034
1035 /* This function, called through note_stores, collects sets and
1036 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1037 by DATA. */
1038 void
1039 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1040 {
1041 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1042 if (REG_P (x) && HARD_REGISTER_P (x))
1043 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1044 }
1045
1046 /* Examine INSN, and compute the set of hard registers written by it.
1047 Store it in *PSET. Should only be called after reload. */
1048 void
1049 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset, bool implicit)
1050 {
1051 rtx link;
1052
1053 CLEAR_HARD_REG_SET (*pset);
1054 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1055 if (CALL_P (insn))
1056 {
1057 if (implicit)
1058 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1059
1060 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1061 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1062 }
1063 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1064 if (REG_NOTE_KIND (link) == REG_INC)
1065 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1066 }
1067
1068 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1069 static int
1070 record_hard_reg_uses_1 (rtx *px, void *data)
1071 {
1072 rtx x = *px;
1073 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1074
1075 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1076 {
1077 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1078 while (nregs-- > 0)
1079 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1080 }
1081 return 0;
1082 }
1083
1084 /* Like record_hard_reg_sets, but called through note_uses. */
1085 void
1086 record_hard_reg_uses (rtx *px, void *data)
1087 {
1088 for_each_rtx (px, record_hard_reg_uses_1, data);
1089 }
1090 \f
1091 /* Given an INSN, return a SET expression if this insn has only a single SET.
1092 It may also have CLOBBERs, USEs, or SET whose output
1093 will not be used, which we ignore. */
1094
1095 rtx
1096 single_set_2 (const_rtx insn, const_rtx pat)
1097 {
1098 rtx set = NULL;
1099 int set_verified = 1;
1100 int i;
1101
1102 if (GET_CODE (pat) == PARALLEL)
1103 {
1104 for (i = 0; i < XVECLEN (pat, 0); i++)
1105 {
1106 rtx sub = XVECEXP (pat, 0, i);
1107 switch (GET_CODE (sub))
1108 {
1109 case USE:
1110 case CLOBBER:
1111 break;
1112
1113 case SET:
1114 /* We can consider insns having multiple sets, where all
1115 but one are dead as single set insns. In common case
1116 only single set is present in the pattern so we want
1117 to avoid checking for REG_UNUSED notes unless necessary.
1118
1119 When we reach set first time, we just expect this is
1120 the single set we are looking for and only when more
1121 sets are found in the insn, we check them. */
1122 if (!set_verified)
1123 {
1124 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1125 && !side_effects_p (set))
1126 set = NULL;
1127 else
1128 set_verified = 1;
1129 }
1130 if (!set)
1131 set = sub, set_verified = 0;
1132 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1133 || side_effects_p (sub))
1134 return NULL_RTX;
1135 break;
1136
1137 default:
1138 return NULL_RTX;
1139 }
1140 }
1141 }
1142 return set;
1143 }
1144
1145 /* Given an INSN, return nonzero if it has more than one SET, else return
1146 zero. */
1147
1148 int
1149 multiple_sets (const_rtx insn)
1150 {
1151 int found;
1152 int i;
1153
1154 /* INSN must be an insn. */
1155 if (! INSN_P (insn))
1156 return 0;
1157
1158 /* Only a PARALLEL can have multiple SETs. */
1159 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1160 {
1161 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1162 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1163 {
1164 /* If we have already found a SET, then return now. */
1165 if (found)
1166 return 1;
1167 else
1168 found = 1;
1169 }
1170 }
1171
1172 /* Either zero or one SET. */
1173 return 0;
1174 }
1175 \f
1176 /* Return nonzero if the destination of SET equals the source
1177 and there are no side effects. */
1178
1179 int
1180 set_noop_p (const_rtx set)
1181 {
1182 rtx src = SET_SRC (set);
1183 rtx dst = SET_DEST (set);
1184
1185 if (dst == pc_rtx && src == pc_rtx)
1186 return 1;
1187
1188 if (MEM_P (dst) && MEM_P (src))
1189 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1190
1191 if (GET_CODE (dst) == ZERO_EXTRACT)
1192 return rtx_equal_p (XEXP (dst, 0), src)
1193 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1194 && !side_effects_p (src);
1195
1196 if (GET_CODE (dst) == STRICT_LOW_PART)
1197 dst = XEXP (dst, 0);
1198
1199 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1200 {
1201 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1202 return 0;
1203 src = SUBREG_REG (src);
1204 dst = SUBREG_REG (dst);
1205 }
1206
1207 /* It is a NOOP if destination overlaps with selected src vector
1208 elements. */
1209 if (GET_CODE (src) == VEC_SELECT
1210 && REG_P (XEXP (src, 0)) && REG_P (dst)
1211 && HARD_REGISTER_P (XEXP (src, 0))
1212 && HARD_REGISTER_P (dst))
1213 {
1214 int i;
1215 rtx par = XEXP (src, 1);
1216 rtx src0 = XEXP (src, 0);
1217 int c0 = INTVAL (XVECEXP (par, 0, 0));
1218 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1219
1220 for (i = 1; i < XVECLEN (par, 0); i++)
1221 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1222 return 0;
1223 return
1224 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1225 offset, GET_MODE (dst)) == (int) REGNO (dst);
1226 }
1227
1228 return (REG_P (src) && REG_P (dst)
1229 && REGNO (src) == REGNO (dst));
1230 }
1231 \f
1232 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1233 value to itself. */
1234
1235 int
1236 noop_move_p (const_rtx insn)
1237 {
1238 rtx pat = PATTERN (insn);
1239
1240 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1241 return 1;
1242
1243 /* Insns carrying these notes are useful later on. */
1244 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1245 return 0;
1246
1247 /* Check the code to be executed for COND_EXEC. */
1248 if (GET_CODE (pat) == COND_EXEC)
1249 pat = COND_EXEC_CODE (pat);
1250
1251 if (GET_CODE (pat) == SET && set_noop_p (pat))
1252 return 1;
1253
1254 if (GET_CODE (pat) == PARALLEL)
1255 {
1256 int i;
1257 /* If nothing but SETs of registers to themselves,
1258 this insn can also be deleted. */
1259 for (i = 0; i < XVECLEN (pat, 0); i++)
1260 {
1261 rtx tem = XVECEXP (pat, 0, i);
1262
1263 if (GET_CODE (tem) == USE
1264 || GET_CODE (tem) == CLOBBER)
1265 continue;
1266
1267 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1268 return 0;
1269 }
1270
1271 return 1;
1272 }
1273 return 0;
1274 }
1275 \f
1276
1277 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1278 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1279 If the object was modified, if we hit a partial assignment to X, or hit a
1280 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1281 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1282 be the src. */
1283
1284 rtx
1285 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1286 {
1287 rtx p;
1288
1289 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1290 p = PREV_INSN (p))
1291 if (INSN_P (p))
1292 {
1293 rtx set = single_set (p);
1294 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1295
1296 if (set && rtx_equal_p (x, SET_DEST (set)))
1297 {
1298 rtx src = SET_SRC (set);
1299
1300 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1301 src = XEXP (note, 0);
1302
1303 if ((valid_to == NULL_RTX
1304 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1305 /* Reject hard registers because we don't usually want
1306 to use them; we'd rather use a pseudo. */
1307 && (! (REG_P (src)
1308 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1309 {
1310 *pinsn = p;
1311 return src;
1312 }
1313 }
1314
1315 /* If set in non-simple way, we don't have a value. */
1316 if (reg_set_p (x, p))
1317 break;
1318 }
1319
1320 return x;
1321 }
1322 \f
1323 /* Return nonzero if register in range [REGNO, ENDREGNO)
1324 appears either explicitly or implicitly in X
1325 other than being stored into.
1326
1327 References contained within the substructure at LOC do not count.
1328 LOC may be zero, meaning don't ignore anything. */
1329
1330 int
1331 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1332 rtx *loc)
1333 {
1334 int i;
1335 unsigned int x_regno;
1336 RTX_CODE code;
1337 const char *fmt;
1338
1339 repeat:
1340 /* The contents of a REG_NONNEG note is always zero, so we must come here
1341 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1342 if (x == 0)
1343 return 0;
1344
1345 code = GET_CODE (x);
1346
1347 switch (code)
1348 {
1349 case REG:
1350 x_regno = REGNO (x);
1351
1352 /* If we modifying the stack, frame, or argument pointer, it will
1353 clobber a virtual register. In fact, we could be more precise,
1354 but it isn't worth it. */
1355 if ((x_regno == STACK_POINTER_REGNUM
1356 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1357 || x_regno == ARG_POINTER_REGNUM
1358 #endif
1359 || x_regno == FRAME_POINTER_REGNUM)
1360 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1361 return 1;
1362
1363 return endregno > x_regno && regno < END_REGNO (x);
1364
1365 case SUBREG:
1366 /* If this is a SUBREG of a hard reg, we can see exactly which
1367 registers are being modified. Otherwise, handle normally. */
1368 if (REG_P (SUBREG_REG (x))
1369 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1370 {
1371 unsigned int inner_regno = subreg_regno (x);
1372 unsigned int inner_endregno
1373 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1374 ? subreg_nregs (x) : 1);
1375
1376 return endregno > inner_regno && regno < inner_endregno;
1377 }
1378 break;
1379
1380 case CLOBBER:
1381 case SET:
1382 if (&SET_DEST (x) != loc
1383 /* Note setting a SUBREG counts as referring to the REG it is in for
1384 a pseudo but not for hard registers since we can
1385 treat each word individually. */
1386 && ((GET_CODE (SET_DEST (x)) == SUBREG
1387 && loc != &SUBREG_REG (SET_DEST (x))
1388 && REG_P (SUBREG_REG (SET_DEST (x)))
1389 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1390 && refers_to_regno_p (regno, endregno,
1391 SUBREG_REG (SET_DEST (x)), loc))
1392 || (!REG_P (SET_DEST (x))
1393 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1394 return 1;
1395
1396 if (code == CLOBBER || loc == &SET_SRC (x))
1397 return 0;
1398 x = SET_SRC (x);
1399 goto repeat;
1400
1401 default:
1402 break;
1403 }
1404
1405 /* X does not match, so try its subexpressions. */
1406
1407 fmt = GET_RTX_FORMAT (code);
1408 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1409 {
1410 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1411 {
1412 if (i == 0)
1413 {
1414 x = XEXP (x, 0);
1415 goto repeat;
1416 }
1417 else
1418 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1419 return 1;
1420 }
1421 else if (fmt[i] == 'E')
1422 {
1423 int j;
1424 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1425 if (loc != &XVECEXP (x, i, j)
1426 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1427 return 1;
1428 }
1429 }
1430 return 0;
1431 }
1432
1433 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1434 we check if any register number in X conflicts with the relevant register
1435 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1436 contains a MEM (we don't bother checking for memory addresses that can't
1437 conflict because we expect this to be a rare case. */
1438
1439 int
1440 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1441 {
1442 unsigned int regno, endregno;
1443
1444 /* If either argument is a constant, then modifying X can not
1445 affect IN. Here we look at IN, we can profitably combine
1446 CONSTANT_P (x) with the switch statement below. */
1447 if (CONSTANT_P (in))
1448 return 0;
1449
1450 recurse:
1451 switch (GET_CODE (x))
1452 {
1453 case STRICT_LOW_PART:
1454 case ZERO_EXTRACT:
1455 case SIGN_EXTRACT:
1456 /* Overly conservative. */
1457 x = XEXP (x, 0);
1458 goto recurse;
1459
1460 case SUBREG:
1461 regno = REGNO (SUBREG_REG (x));
1462 if (regno < FIRST_PSEUDO_REGISTER)
1463 regno = subreg_regno (x);
1464 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1465 ? subreg_nregs (x) : 1);
1466 goto do_reg;
1467
1468 case REG:
1469 regno = REGNO (x);
1470 endregno = END_REGNO (x);
1471 do_reg:
1472 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1473
1474 case MEM:
1475 {
1476 const char *fmt;
1477 int i;
1478
1479 if (MEM_P (in))
1480 return 1;
1481
1482 fmt = GET_RTX_FORMAT (GET_CODE (in));
1483 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1484 if (fmt[i] == 'e')
1485 {
1486 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1487 return 1;
1488 }
1489 else if (fmt[i] == 'E')
1490 {
1491 int j;
1492 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1493 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1494 return 1;
1495 }
1496
1497 return 0;
1498 }
1499
1500 case SCRATCH:
1501 case PC:
1502 case CC0:
1503 return reg_mentioned_p (x, in);
1504
1505 case PARALLEL:
1506 {
1507 int i;
1508
1509 /* If any register in here refers to it we return true. */
1510 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1511 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1512 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1513 return 1;
1514 return 0;
1515 }
1516
1517 default:
1518 gcc_assert (CONSTANT_P (x));
1519 return 0;
1520 }
1521 }
1522 \f
1523 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1524 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1525 ignored by note_stores, but passed to FUN.
1526
1527 FUN receives three arguments:
1528 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1529 2. the SET or CLOBBER rtx that does the store,
1530 3. the pointer DATA provided to note_stores.
1531
1532 If the item being stored in or clobbered is a SUBREG of a hard register,
1533 the SUBREG will be passed. */
1534
1535 void
1536 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1537 {
1538 int i;
1539
1540 if (GET_CODE (x) == COND_EXEC)
1541 x = COND_EXEC_CODE (x);
1542
1543 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1544 {
1545 rtx dest = SET_DEST (x);
1546
1547 while ((GET_CODE (dest) == SUBREG
1548 && (!REG_P (SUBREG_REG (dest))
1549 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1550 || GET_CODE (dest) == ZERO_EXTRACT
1551 || GET_CODE (dest) == STRICT_LOW_PART)
1552 dest = XEXP (dest, 0);
1553
1554 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1555 each of whose first operand is a register. */
1556 if (GET_CODE (dest) == PARALLEL)
1557 {
1558 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1559 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1560 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1561 }
1562 else
1563 (*fun) (dest, x, data);
1564 }
1565
1566 else if (GET_CODE (x) == PARALLEL)
1567 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1568 note_stores (XVECEXP (x, 0, i), fun, data);
1569 }
1570 \f
1571 /* Like notes_stores, but call FUN for each expression that is being
1572 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1573 FUN for each expression, not any interior subexpressions. FUN receives a
1574 pointer to the expression and the DATA passed to this function.
1575
1576 Note that this is not quite the same test as that done in reg_referenced_p
1577 since that considers something as being referenced if it is being
1578 partially set, while we do not. */
1579
1580 void
1581 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1582 {
1583 rtx body = *pbody;
1584 int i;
1585
1586 switch (GET_CODE (body))
1587 {
1588 case COND_EXEC:
1589 (*fun) (&COND_EXEC_TEST (body), data);
1590 note_uses (&COND_EXEC_CODE (body), fun, data);
1591 return;
1592
1593 case PARALLEL:
1594 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1595 note_uses (&XVECEXP (body, 0, i), fun, data);
1596 return;
1597
1598 case SEQUENCE:
1599 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1600 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1601 return;
1602
1603 case USE:
1604 (*fun) (&XEXP (body, 0), data);
1605 return;
1606
1607 case ASM_OPERANDS:
1608 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1609 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1610 return;
1611
1612 case TRAP_IF:
1613 (*fun) (&TRAP_CONDITION (body), data);
1614 return;
1615
1616 case PREFETCH:
1617 (*fun) (&XEXP (body, 0), data);
1618 return;
1619
1620 case UNSPEC:
1621 case UNSPEC_VOLATILE:
1622 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1623 (*fun) (&XVECEXP (body, 0, i), data);
1624 return;
1625
1626 case CLOBBER:
1627 if (MEM_P (XEXP (body, 0)))
1628 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1629 return;
1630
1631 case SET:
1632 {
1633 rtx dest = SET_DEST (body);
1634
1635 /* For sets we replace everything in source plus registers in memory
1636 expression in store and operands of a ZERO_EXTRACT. */
1637 (*fun) (&SET_SRC (body), data);
1638
1639 if (GET_CODE (dest) == ZERO_EXTRACT)
1640 {
1641 (*fun) (&XEXP (dest, 1), data);
1642 (*fun) (&XEXP (dest, 2), data);
1643 }
1644
1645 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1646 dest = XEXP (dest, 0);
1647
1648 if (MEM_P (dest))
1649 (*fun) (&XEXP (dest, 0), data);
1650 }
1651 return;
1652
1653 default:
1654 /* All the other possibilities never store. */
1655 (*fun) (pbody, data);
1656 return;
1657 }
1658 }
1659 \f
1660 /* Return nonzero if X's old contents don't survive after INSN.
1661 This will be true if X is (cc0) or if X is a register and
1662 X dies in INSN or because INSN entirely sets X.
1663
1664 "Entirely set" means set directly and not through a SUBREG, or
1665 ZERO_EXTRACT, so no trace of the old contents remains.
1666 Likewise, REG_INC does not count.
1667
1668 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1669 but for this use that makes no difference, since regs don't overlap
1670 during their lifetimes. Therefore, this function may be used
1671 at any time after deaths have been computed.
1672
1673 If REG is a hard reg that occupies multiple machine registers, this
1674 function will only return 1 if each of those registers will be replaced
1675 by INSN. */
1676
1677 int
1678 dead_or_set_p (const_rtx insn, const_rtx x)
1679 {
1680 unsigned int regno, end_regno;
1681 unsigned int i;
1682
1683 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1684 if (GET_CODE (x) == CC0)
1685 return 1;
1686
1687 gcc_assert (REG_P (x));
1688
1689 regno = REGNO (x);
1690 end_regno = END_REGNO (x);
1691 for (i = regno; i < end_regno; i++)
1692 if (! dead_or_set_regno_p (insn, i))
1693 return 0;
1694
1695 return 1;
1696 }
1697
1698 /* Return TRUE iff DEST is a register or subreg of a register and
1699 doesn't change the number of words of the inner register, and any
1700 part of the register is TEST_REGNO. */
1701
1702 static bool
1703 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1704 {
1705 unsigned int regno, endregno;
1706
1707 if (GET_CODE (dest) == SUBREG
1708 && (((GET_MODE_SIZE (GET_MODE (dest))
1709 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1710 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1711 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1712 dest = SUBREG_REG (dest);
1713
1714 if (!REG_P (dest))
1715 return false;
1716
1717 regno = REGNO (dest);
1718 endregno = END_REGNO (dest);
1719 return (test_regno >= regno && test_regno < endregno);
1720 }
1721
1722 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1723 any member matches the covers_regno_no_parallel_p criteria. */
1724
1725 static bool
1726 covers_regno_p (const_rtx dest, unsigned int test_regno)
1727 {
1728 if (GET_CODE (dest) == PARALLEL)
1729 {
1730 /* Some targets place small structures in registers for return
1731 values of functions, and those registers are wrapped in
1732 PARALLELs that we may see as the destination of a SET. */
1733 int i;
1734
1735 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1736 {
1737 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1738 if (inner != NULL_RTX
1739 && covers_regno_no_parallel_p (inner, test_regno))
1740 return true;
1741 }
1742
1743 return false;
1744 }
1745 else
1746 return covers_regno_no_parallel_p (dest, test_regno);
1747 }
1748
1749 /* Utility function for dead_or_set_p to check an individual register. */
1750
1751 int
1752 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1753 {
1754 const_rtx pattern;
1755
1756 /* See if there is a death note for something that includes TEST_REGNO. */
1757 if (find_regno_note (insn, REG_DEAD, test_regno))
1758 return 1;
1759
1760 if (CALL_P (insn)
1761 && find_regno_fusage (insn, CLOBBER, test_regno))
1762 return 1;
1763
1764 pattern = PATTERN (insn);
1765
1766 /* If a COND_EXEC is not executed, the value survives. */
1767 if (GET_CODE (pattern) == COND_EXEC)
1768 return 0;
1769
1770 if (GET_CODE (pattern) == SET)
1771 return covers_regno_p (SET_DEST (pattern), test_regno);
1772 else if (GET_CODE (pattern) == PARALLEL)
1773 {
1774 int i;
1775
1776 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1777 {
1778 rtx body = XVECEXP (pattern, 0, i);
1779
1780 if (GET_CODE (body) == COND_EXEC)
1781 body = COND_EXEC_CODE (body);
1782
1783 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1784 && covers_regno_p (SET_DEST (body), test_regno))
1785 return 1;
1786 }
1787 }
1788
1789 return 0;
1790 }
1791
1792 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1793 If DATUM is nonzero, look for one whose datum is DATUM. */
1794
1795 rtx
1796 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1797 {
1798 rtx link;
1799
1800 gcc_checking_assert (insn);
1801
1802 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1803 if (! INSN_P (insn))
1804 return 0;
1805 if (datum == 0)
1806 {
1807 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1808 if (REG_NOTE_KIND (link) == kind)
1809 return link;
1810 return 0;
1811 }
1812
1813 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1814 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1815 return link;
1816 return 0;
1817 }
1818
1819 /* Return the reg-note of kind KIND in insn INSN which applies to register
1820 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1821 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1822 it might be the case that the note overlaps REGNO. */
1823
1824 rtx
1825 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1826 {
1827 rtx link;
1828
1829 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1830 if (! INSN_P (insn))
1831 return 0;
1832
1833 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1834 if (REG_NOTE_KIND (link) == kind
1835 /* Verify that it is a register, so that scratch and MEM won't cause a
1836 problem here. */
1837 && REG_P (XEXP (link, 0))
1838 && REGNO (XEXP (link, 0)) <= regno
1839 && END_REGNO (XEXP (link, 0)) > regno)
1840 return link;
1841 return 0;
1842 }
1843
1844 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1845 has such a note. */
1846
1847 rtx
1848 find_reg_equal_equiv_note (const_rtx insn)
1849 {
1850 rtx link;
1851
1852 if (!INSN_P (insn))
1853 return 0;
1854
1855 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1856 if (REG_NOTE_KIND (link) == REG_EQUAL
1857 || REG_NOTE_KIND (link) == REG_EQUIV)
1858 {
1859 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1860 insns that have multiple sets. Checking single_set to
1861 make sure of this is not the proper check, as explained
1862 in the comment in set_unique_reg_note.
1863
1864 This should be changed into an assert. */
1865 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1866 return 0;
1867 return link;
1868 }
1869 return NULL;
1870 }
1871
1872 /* Check whether INSN is a single_set whose source is known to be
1873 equivalent to a constant. Return that constant if so, otherwise
1874 return null. */
1875
1876 rtx
1877 find_constant_src (const_rtx insn)
1878 {
1879 rtx note, set, x;
1880
1881 set = single_set (insn);
1882 if (set)
1883 {
1884 x = avoid_constant_pool_reference (SET_SRC (set));
1885 if (CONSTANT_P (x))
1886 return x;
1887 }
1888
1889 note = find_reg_equal_equiv_note (insn);
1890 if (note && CONSTANT_P (XEXP (note, 0)))
1891 return XEXP (note, 0);
1892
1893 return NULL_RTX;
1894 }
1895
1896 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1897 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1898
1899 int
1900 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1901 {
1902 /* If it's not a CALL_INSN, it can't possibly have a
1903 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1904 if (!CALL_P (insn))
1905 return 0;
1906
1907 gcc_assert (datum);
1908
1909 if (!REG_P (datum))
1910 {
1911 rtx link;
1912
1913 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1914 link;
1915 link = XEXP (link, 1))
1916 if (GET_CODE (XEXP (link, 0)) == code
1917 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1918 return 1;
1919 }
1920 else
1921 {
1922 unsigned int regno = REGNO (datum);
1923
1924 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1925 to pseudo registers, so don't bother checking. */
1926
1927 if (regno < FIRST_PSEUDO_REGISTER)
1928 {
1929 unsigned int end_regno = END_HARD_REGNO (datum);
1930 unsigned int i;
1931
1932 for (i = regno; i < end_regno; i++)
1933 if (find_regno_fusage (insn, code, i))
1934 return 1;
1935 }
1936 }
1937
1938 return 0;
1939 }
1940
1941 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1942 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1943
1944 int
1945 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1946 {
1947 rtx link;
1948
1949 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1950 to pseudo registers, so don't bother checking. */
1951
1952 if (regno >= FIRST_PSEUDO_REGISTER
1953 || !CALL_P (insn) )
1954 return 0;
1955
1956 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1957 {
1958 rtx op, reg;
1959
1960 if (GET_CODE (op = XEXP (link, 0)) == code
1961 && REG_P (reg = XEXP (op, 0))
1962 && REGNO (reg) <= regno
1963 && END_HARD_REGNO (reg) > regno)
1964 return 1;
1965 }
1966
1967 return 0;
1968 }
1969
1970 \f
1971 /* Return true if KIND is an integer REG_NOTE. */
1972
1973 static bool
1974 int_reg_note_p (enum reg_note kind)
1975 {
1976 return kind == REG_BR_PROB;
1977 }
1978
1979 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1980 stored as the pointer to the next register note. */
1981
1982 rtx
1983 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1984 {
1985 rtx note;
1986
1987 gcc_checking_assert (!int_reg_note_p (kind));
1988 switch (kind)
1989 {
1990 case REG_CC_SETTER:
1991 case REG_CC_USER:
1992 case REG_LABEL_TARGET:
1993 case REG_LABEL_OPERAND:
1994 case REG_TM:
1995 /* These types of register notes use an INSN_LIST rather than an
1996 EXPR_LIST, so that copying is done right and dumps look
1997 better. */
1998 note = alloc_INSN_LIST (datum, list);
1999 PUT_REG_NOTE_KIND (note, kind);
2000 break;
2001
2002 default:
2003 note = alloc_EXPR_LIST (kind, datum, list);
2004 break;
2005 }
2006
2007 return note;
2008 }
2009
2010 /* Add register note with kind KIND and datum DATUM to INSN. */
2011
2012 void
2013 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2014 {
2015 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2016 }
2017
2018 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2019
2020 void
2021 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2022 {
2023 gcc_checking_assert (int_reg_note_p (kind));
2024 REG_NOTES (insn) = gen_rtx_INT_LIST ((enum machine_mode) kind,
2025 datum, REG_NOTES (insn));
2026 }
2027
2028 /* Add a register note like NOTE to INSN. */
2029
2030 void
2031 add_shallow_copy_of_reg_note (rtx insn, rtx note)
2032 {
2033 if (GET_CODE (note) == INT_LIST)
2034 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2035 else
2036 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2037 }
2038
2039 /* Remove register note NOTE from the REG_NOTES of INSN. */
2040
2041 void
2042 remove_note (rtx insn, const_rtx note)
2043 {
2044 rtx link;
2045
2046 if (note == NULL_RTX)
2047 return;
2048
2049 if (REG_NOTES (insn) == note)
2050 REG_NOTES (insn) = XEXP (note, 1);
2051 else
2052 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2053 if (XEXP (link, 1) == note)
2054 {
2055 XEXP (link, 1) = XEXP (note, 1);
2056 break;
2057 }
2058
2059 switch (REG_NOTE_KIND (note))
2060 {
2061 case REG_EQUAL:
2062 case REG_EQUIV:
2063 df_notes_rescan (insn);
2064 break;
2065 default:
2066 break;
2067 }
2068 }
2069
2070 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2071
2072 void
2073 remove_reg_equal_equiv_notes (rtx insn)
2074 {
2075 rtx *loc;
2076
2077 loc = &REG_NOTES (insn);
2078 while (*loc)
2079 {
2080 enum reg_note kind = REG_NOTE_KIND (*loc);
2081 if (kind == REG_EQUAL || kind == REG_EQUIV)
2082 *loc = XEXP (*loc, 1);
2083 else
2084 loc = &XEXP (*loc, 1);
2085 }
2086 }
2087
2088 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2089
2090 void
2091 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2092 {
2093 df_ref eq_use;
2094
2095 if (!df)
2096 return;
2097
2098 /* This loop is a little tricky. We cannot just go down the chain because
2099 it is being modified by some actions in the loop. So we just iterate
2100 over the head. We plan to drain the list anyway. */
2101 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2102 {
2103 rtx insn = DF_REF_INSN (eq_use);
2104 rtx note = find_reg_equal_equiv_note (insn);
2105
2106 /* This assert is generally triggered when someone deletes a REG_EQUAL
2107 or REG_EQUIV note by hacking the list manually rather than calling
2108 remove_note. */
2109 gcc_assert (note);
2110
2111 remove_note (insn, note);
2112 }
2113 }
2114
2115 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2116 return 1 if it is found. A simple equality test is used to determine if
2117 NODE matches. */
2118
2119 int
2120 in_expr_list_p (const_rtx listp, const_rtx node)
2121 {
2122 const_rtx x;
2123
2124 for (x = listp; x; x = XEXP (x, 1))
2125 if (node == XEXP (x, 0))
2126 return 1;
2127
2128 return 0;
2129 }
2130
2131 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2132 remove that entry from the list if it is found.
2133
2134 A simple equality test is used to determine if NODE matches. */
2135
2136 void
2137 remove_node_from_expr_list (const_rtx node, rtx *listp)
2138 {
2139 rtx temp = *listp;
2140 rtx prev = NULL_RTX;
2141
2142 while (temp)
2143 {
2144 if (node == XEXP (temp, 0))
2145 {
2146 /* Splice the node out of the list. */
2147 if (prev)
2148 XEXP (prev, 1) = XEXP (temp, 1);
2149 else
2150 *listp = XEXP (temp, 1);
2151
2152 return;
2153 }
2154
2155 prev = temp;
2156 temp = XEXP (temp, 1);
2157 }
2158 }
2159 \f
2160 /* Nonzero if X contains any volatile instructions. These are instructions
2161 which may cause unpredictable machine state instructions, and thus no
2162 instructions or register uses should be moved or combined across them.
2163 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2164
2165 int
2166 volatile_insn_p (const_rtx x)
2167 {
2168 const RTX_CODE code = GET_CODE (x);
2169 switch (code)
2170 {
2171 case LABEL_REF:
2172 case SYMBOL_REF:
2173 case CONST:
2174 CASE_CONST_ANY:
2175 case CC0:
2176 case PC:
2177 case REG:
2178 case SCRATCH:
2179 case CLOBBER:
2180 case ADDR_VEC:
2181 case ADDR_DIFF_VEC:
2182 case CALL:
2183 case MEM:
2184 return 0;
2185
2186 case UNSPEC_VOLATILE:
2187 return 1;
2188
2189 case ASM_INPUT:
2190 case ASM_OPERANDS:
2191 if (MEM_VOLATILE_P (x))
2192 return 1;
2193
2194 default:
2195 break;
2196 }
2197
2198 /* Recursively scan the operands of this expression. */
2199
2200 {
2201 const char *const fmt = GET_RTX_FORMAT (code);
2202 int i;
2203
2204 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2205 {
2206 if (fmt[i] == 'e')
2207 {
2208 if (volatile_insn_p (XEXP (x, i)))
2209 return 1;
2210 }
2211 else if (fmt[i] == 'E')
2212 {
2213 int j;
2214 for (j = 0; j < XVECLEN (x, i); j++)
2215 if (volatile_insn_p (XVECEXP (x, i, j)))
2216 return 1;
2217 }
2218 }
2219 }
2220 return 0;
2221 }
2222
2223 /* Nonzero if X contains any volatile memory references
2224 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2225
2226 int
2227 volatile_refs_p (const_rtx x)
2228 {
2229 const RTX_CODE code = GET_CODE (x);
2230 switch (code)
2231 {
2232 case LABEL_REF:
2233 case SYMBOL_REF:
2234 case CONST:
2235 CASE_CONST_ANY:
2236 case CC0:
2237 case PC:
2238 case REG:
2239 case SCRATCH:
2240 case CLOBBER:
2241 case ADDR_VEC:
2242 case ADDR_DIFF_VEC:
2243 return 0;
2244
2245 case UNSPEC_VOLATILE:
2246 return 1;
2247
2248 case MEM:
2249 case ASM_INPUT:
2250 case ASM_OPERANDS:
2251 if (MEM_VOLATILE_P (x))
2252 return 1;
2253
2254 default:
2255 break;
2256 }
2257
2258 /* Recursively scan the operands of this expression. */
2259
2260 {
2261 const char *const fmt = GET_RTX_FORMAT (code);
2262 int i;
2263
2264 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2265 {
2266 if (fmt[i] == 'e')
2267 {
2268 if (volatile_refs_p (XEXP (x, i)))
2269 return 1;
2270 }
2271 else if (fmt[i] == 'E')
2272 {
2273 int j;
2274 for (j = 0; j < XVECLEN (x, i); j++)
2275 if (volatile_refs_p (XVECEXP (x, i, j)))
2276 return 1;
2277 }
2278 }
2279 }
2280 return 0;
2281 }
2282
2283 /* Similar to above, except that it also rejects register pre- and post-
2284 incrementing. */
2285
2286 int
2287 side_effects_p (const_rtx x)
2288 {
2289 const RTX_CODE code = GET_CODE (x);
2290 switch (code)
2291 {
2292 case LABEL_REF:
2293 case SYMBOL_REF:
2294 case CONST:
2295 CASE_CONST_ANY:
2296 case CC0:
2297 case PC:
2298 case REG:
2299 case SCRATCH:
2300 case ADDR_VEC:
2301 case ADDR_DIFF_VEC:
2302 case VAR_LOCATION:
2303 return 0;
2304
2305 case CLOBBER:
2306 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2307 when some combination can't be done. If we see one, don't think
2308 that we can simplify the expression. */
2309 return (GET_MODE (x) != VOIDmode);
2310
2311 case PRE_INC:
2312 case PRE_DEC:
2313 case POST_INC:
2314 case POST_DEC:
2315 case PRE_MODIFY:
2316 case POST_MODIFY:
2317 case CALL:
2318 case UNSPEC_VOLATILE:
2319 return 1;
2320
2321 case MEM:
2322 case ASM_INPUT:
2323 case ASM_OPERANDS:
2324 if (MEM_VOLATILE_P (x))
2325 return 1;
2326
2327 default:
2328 break;
2329 }
2330
2331 /* Recursively scan the operands of this expression. */
2332
2333 {
2334 const char *fmt = GET_RTX_FORMAT (code);
2335 int i;
2336
2337 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2338 {
2339 if (fmt[i] == 'e')
2340 {
2341 if (side_effects_p (XEXP (x, i)))
2342 return 1;
2343 }
2344 else if (fmt[i] == 'E')
2345 {
2346 int j;
2347 for (j = 0; j < XVECLEN (x, i); j++)
2348 if (side_effects_p (XVECEXP (x, i, j)))
2349 return 1;
2350 }
2351 }
2352 }
2353 return 0;
2354 }
2355 \f
2356 /* Return nonzero if evaluating rtx X might cause a trap.
2357 FLAGS controls how to consider MEMs. A nonzero means the context
2358 of the access may have changed from the original, such that the
2359 address may have become invalid. */
2360
2361 int
2362 may_trap_p_1 (const_rtx x, unsigned flags)
2363 {
2364 int i;
2365 enum rtx_code code;
2366 const char *fmt;
2367
2368 /* We make no distinction currently, but this function is part of
2369 the internal target-hooks ABI so we keep the parameter as
2370 "unsigned flags". */
2371 bool code_changed = flags != 0;
2372
2373 if (x == 0)
2374 return 0;
2375 code = GET_CODE (x);
2376 switch (code)
2377 {
2378 /* Handle these cases quickly. */
2379 CASE_CONST_ANY:
2380 case SYMBOL_REF:
2381 case LABEL_REF:
2382 case CONST:
2383 case PC:
2384 case CC0:
2385 case REG:
2386 case SCRATCH:
2387 return 0;
2388
2389 case UNSPEC:
2390 return targetm.unspec_may_trap_p (x, flags);
2391
2392 case UNSPEC_VOLATILE:
2393 case ASM_INPUT:
2394 case TRAP_IF:
2395 return 1;
2396
2397 case ASM_OPERANDS:
2398 return MEM_VOLATILE_P (x);
2399
2400 /* Memory ref can trap unless it's a static var or a stack slot. */
2401 case MEM:
2402 /* Recognize specific pattern of stack checking probes. */
2403 if (flag_stack_check
2404 && MEM_VOLATILE_P (x)
2405 && XEXP (x, 0) == stack_pointer_rtx)
2406 return 1;
2407 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2408 reference; moving it out of context such as when moving code
2409 when optimizing, might cause its address to become invalid. */
2410 code_changed
2411 || !MEM_NOTRAP_P (x))
2412 {
2413 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2414 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2415 GET_MODE (x), code_changed);
2416 }
2417
2418 return 0;
2419
2420 /* Division by a non-constant might trap. */
2421 case DIV:
2422 case MOD:
2423 case UDIV:
2424 case UMOD:
2425 if (HONOR_SNANS (GET_MODE (x)))
2426 return 1;
2427 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2428 return flag_trapping_math;
2429 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2430 return 1;
2431 break;
2432
2433 case EXPR_LIST:
2434 /* An EXPR_LIST is used to represent a function call. This
2435 certainly may trap. */
2436 return 1;
2437
2438 case GE:
2439 case GT:
2440 case LE:
2441 case LT:
2442 case LTGT:
2443 case COMPARE:
2444 /* Some floating point comparisons may trap. */
2445 if (!flag_trapping_math)
2446 break;
2447 /* ??? There is no machine independent way to check for tests that trap
2448 when COMPARE is used, though many targets do make this distinction.
2449 For instance, sparc uses CCFPE for compares which generate exceptions
2450 and CCFP for compares which do not generate exceptions. */
2451 if (HONOR_NANS (GET_MODE (x)))
2452 return 1;
2453 /* But often the compare has some CC mode, so check operand
2454 modes as well. */
2455 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2456 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2457 return 1;
2458 break;
2459
2460 case EQ:
2461 case NE:
2462 if (HONOR_SNANS (GET_MODE (x)))
2463 return 1;
2464 /* Often comparison is CC mode, so check operand modes. */
2465 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2466 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2467 return 1;
2468 break;
2469
2470 case FIX:
2471 /* Conversion of floating point might trap. */
2472 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2473 return 1;
2474 break;
2475
2476 case NEG:
2477 case ABS:
2478 case SUBREG:
2479 /* These operations don't trap even with floating point. */
2480 break;
2481
2482 default:
2483 /* Any floating arithmetic may trap. */
2484 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2485 return 1;
2486 }
2487
2488 fmt = GET_RTX_FORMAT (code);
2489 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2490 {
2491 if (fmt[i] == 'e')
2492 {
2493 if (may_trap_p_1 (XEXP (x, i), flags))
2494 return 1;
2495 }
2496 else if (fmt[i] == 'E')
2497 {
2498 int j;
2499 for (j = 0; j < XVECLEN (x, i); j++)
2500 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2501 return 1;
2502 }
2503 }
2504 return 0;
2505 }
2506
2507 /* Return nonzero if evaluating rtx X might cause a trap. */
2508
2509 int
2510 may_trap_p (const_rtx x)
2511 {
2512 return may_trap_p_1 (x, 0);
2513 }
2514
2515 /* Same as above, but additionally return nonzero if evaluating rtx X might
2516 cause a fault. We define a fault for the purpose of this function as a
2517 erroneous execution condition that cannot be encountered during the normal
2518 execution of a valid program; the typical example is an unaligned memory
2519 access on a strict alignment machine. The compiler guarantees that it
2520 doesn't generate code that will fault from a valid program, but this
2521 guarantee doesn't mean anything for individual instructions. Consider
2522 the following example:
2523
2524 struct S { int d; union { char *cp; int *ip; }; };
2525
2526 int foo(struct S *s)
2527 {
2528 if (s->d == 1)
2529 return *s->ip;
2530 else
2531 return *s->cp;
2532 }
2533
2534 on a strict alignment machine. In a valid program, foo will never be
2535 invoked on a structure for which d is equal to 1 and the underlying
2536 unique field of the union not aligned on a 4-byte boundary, but the
2537 expression *s->ip might cause a fault if considered individually.
2538
2539 At the RTL level, potentially problematic expressions will almost always
2540 verify may_trap_p; for example, the above dereference can be emitted as
2541 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2542 However, suppose that foo is inlined in a caller that causes s->cp to
2543 point to a local character variable and guarantees that s->d is not set
2544 to 1; foo may have been effectively translated into pseudo-RTL as:
2545
2546 if ((reg:SI) == 1)
2547 (set (reg:SI) (mem:SI (%fp - 7)))
2548 else
2549 (set (reg:QI) (mem:QI (%fp - 7)))
2550
2551 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2552 memory reference to a stack slot, but it will certainly cause a fault
2553 on a strict alignment machine. */
2554
2555 int
2556 may_trap_or_fault_p (const_rtx x)
2557 {
2558 return may_trap_p_1 (x, 1);
2559 }
2560 \f
2561 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2562 i.e., an inequality. */
2563
2564 int
2565 inequality_comparisons_p (const_rtx x)
2566 {
2567 const char *fmt;
2568 int len, i;
2569 const enum rtx_code code = GET_CODE (x);
2570
2571 switch (code)
2572 {
2573 case REG:
2574 case SCRATCH:
2575 case PC:
2576 case CC0:
2577 CASE_CONST_ANY:
2578 case CONST:
2579 case LABEL_REF:
2580 case SYMBOL_REF:
2581 return 0;
2582
2583 case LT:
2584 case LTU:
2585 case GT:
2586 case GTU:
2587 case LE:
2588 case LEU:
2589 case GE:
2590 case GEU:
2591 return 1;
2592
2593 default:
2594 break;
2595 }
2596
2597 len = GET_RTX_LENGTH (code);
2598 fmt = GET_RTX_FORMAT (code);
2599
2600 for (i = 0; i < len; i++)
2601 {
2602 if (fmt[i] == 'e')
2603 {
2604 if (inequality_comparisons_p (XEXP (x, i)))
2605 return 1;
2606 }
2607 else if (fmt[i] == 'E')
2608 {
2609 int j;
2610 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2611 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2612 return 1;
2613 }
2614 }
2615
2616 return 0;
2617 }
2618 \f
2619 /* Replace any occurrence of FROM in X with TO. The function does
2620 not enter into CONST_DOUBLE for the replace.
2621
2622 Note that copying is not done so X must not be shared unless all copies
2623 are to be modified. */
2624
2625 rtx
2626 replace_rtx (rtx x, rtx from, rtx to)
2627 {
2628 int i, j;
2629 const char *fmt;
2630
2631 if (x == from)
2632 return to;
2633
2634 /* Allow this function to make replacements in EXPR_LISTs. */
2635 if (x == 0)
2636 return 0;
2637
2638 if (GET_CODE (x) == SUBREG)
2639 {
2640 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2641
2642 if (CONST_INT_P (new_rtx))
2643 {
2644 x = simplify_subreg (GET_MODE (x), new_rtx,
2645 GET_MODE (SUBREG_REG (x)),
2646 SUBREG_BYTE (x));
2647 gcc_assert (x);
2648 }
2649 else
2650 SUBREG_REG (x) = new_rtx;
2651
2652 return x;
2653 }
2654 else if (GET_CODE (x) == ZERO_EXTEND)
2655 {
2656 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2657
2658 if (CONST_INT_P (new_rtx))
2659 {
2660 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2661 new_rtx, GET_MODE (XEXP (x, 0)));
2662 gcc_assert (x);
2663 }
2664 else
2665 XEXP (x, 0) = new_rtx;
2666
2667 return x;
2668 }
2669
2670 fmt = GET_RTX_FORMAT (GET_CODE (x));
2671 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2672 {
2673 if (fmt[i] == 'e')
2674 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2675 else if (fmt[i] == 'E')
2676 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2677 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2678 }
2679
2680 return x;
2681 }
2682 \f
2683 /* Replace occurrences of the old label in *X with the new one.
2684 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2685
2686 int
2687 replace_label (rtx *x, void *data)
2688 {
2689 rtx l = *x;
2690 rtx old_label = ((replace_label_data *) data)->r1;
2691 rtx new_label = ((replace_label_data *) data)->r2;
2692 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2693
2694 if (l == NULL_RTX)
2695 return 0;
2696
2697 if (GET_CODE (l) == SYMBOL_REF
2698 && CONSTANT_POOL_ADDRESS_P (l))
2699 {
2700 rtx c = get_pool_constant (l);
2701 if (rtx_referenced_p (old_label, c))
2702 {
2703 rtx new_c, new_l;
2704 replace_label_data *d = (replace_label_data *) data;
2705
2706 /* Create a copy of constant C; replace the label inside
2707 but do not update LABEL_NUSES because uses in constant pool
2708 are not counted. */
2709 new_c = copy_rtx (c);
2710 d->update_label_nuses = false;
2711 for_each_rtx (&new_c, replace_label, data);
2712 d->update_label_nuses = update_label_nuses;
2713
2714 /* Add the new constant NEW_C to constant pool and replace
2715 the old reference to constant by new reference. */
2716 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2717 *x = replace_rtx (l, l, new_l);
2718 }
2719 return 0;
2720 }
2721
2722 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2723 field. This is not handled by for_each_rtx because it doesn't
2724 handle unprinted ('0') fields. */
2725 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2726 JUMP_LABEL (l) = new_label;
2727
2728 if ((GET_CODE (l) == LABEL_REF
2729 || GET_CODE (l) == INSN_LIST)
2730 && XEXP (l, 0) == old_label)
2731 {
2732 XEXP (l, 0) = new_label;
2733 if (update_label_nuses)
2734 {
2735 ++LABEL_NUSES (new_label);
2736 --LABEL_NUSES (old_label);
2737 }
2738 return 0;
2739 }
2740
2741 return 0;
2742 }
2743
2744 /* When *BODY is equal to X or X is directly referenced by *BODY
2745 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2746 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2747
2748 static int
2749 rtx_referenced_p_1 (rtx *body, void *x)
2750 {
2751 rtx y = (rtx) x;
2752
2753 if (*body == NULL_RTX)
2754 return y == NULL_RTX;
2755
2756 /* Return true if a label_ref *BODY refers to label Y. */
2757 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2758 return XEXP (*body, 0) == y;
2759
2760 /* If *BODY is a reference to pool constant traverse the constant. */
2761 if (GET_CODE (*body) == SYMBOL_REF
2762 && CONSTANT_POOL_ADDRESS_P (*body))
2763 return rtx_referenced_p (y, get_pool_constant (*body));
2764
2765 /* By default, compare the RTL expressions. */
2766 return rtx_equal_p (*body, y);
2767 }
2768
2769 /* Return true if X is referenced in BODY. */
2770
2771 int
2772 rtx_referenced_p (rtx x, rtx body)
2773 {
2774 return for_each_rtx (&body, rtx_referenced_p_1, x);
2775 }
2776
2777 /* If INSN is a tablejump return true and store the label (before jump table) to
2778 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2779
2780 bool
2781 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2782 {
2783 rtx label, table;
2784
2785 if (!JUMP_P (insn))
2786 return false;
2787
2788 label = JUMP_LABEL (insn);
2789 if (label != NULL_RTX && !ANY_RETURN_P (label)
2790 && (table = NEXT_INSN (label)) != NULL_RTX
2791 && JUMP_TABLE_DATA_P (table))
2792 {
2793 if (labelp)
2794 *labelp = label;
2795 if (tablep)
2796 *tablep = table;
2797 return true;
2798 }
2799 return false;
2800 }
2801
2802 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2803 constant that is not in the constant pool and not in the condition
2804 of an IF_THEN_ELSE. */
2805
2806 static int
2807 computed_jump_p_1 (const_rtx x)
2808 {
2809 const enum rtx_code code = GET_CODE (x);
2810 int i, j;
2811 const char *fmt;
2812
2813 switch (code)
2814 {
2815 case LABEL_REF:
2816 case PC:
2817 return 0;
2818
2819 case CONST:
2820 CASE_CONST_ANY:
2821 case SYMBOL_REF:
2822 case REG:
2823 return 1;
2824
2825 case MEM:
2826 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2827 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2828
2829 case IF_THEN_ELSE:
2830 return (computed_jump_p_1 (XEXP (x, 1))
2831 || computed_jump_p_1 (XEXP (x, 2)));
2832
2833 default:
2834 break;
2835 }
2836
2837 fmt = GET_RTX_FORMAT (code);
2838 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2839 {
2840 if (fmt[i] == 'e'
2841 && computed_jump_p_1 (XEXP (x, i)))
2842 return 1;
2843
2844 else if (fmt[i] == 'E')
2845 for (j = 0; j < XVECLEN (x, i); j++)
2846 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2847 return 1;
2848 }
2849
2850 return 0;
2851 }
2852
2853 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2854
2855 Tablejumps and casesi insns are not considered indirect jumps;
2856 we can recognize them by a (use (label_ref)). */
2857
2858 int
2859 computed_jump_p (const_rtx insn)
2860 {
2861 int i;
2862 if (JUMP_P (insn))
2863 {
2864 rtx pat = PATTERN (insn);
2865
2866 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2867 if (JUMP_LABEL (insn) != NULL)
2868 return 0;
2869
2870 if (GET_CODE (pat) == PARALLEL)
2871 {
2872 int len = XVECLEN (pat, 0);
2873 int has_use_labelref = 0;
2874
2875 for (i = len - 1; i >= 0; i--)
2876 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2877 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2878 == LABEL_REF))
2879 {
2880 has_use_labelref = 1;
2881 break;
2882 }
2883
2884 if (! has_use_labelref)
2885 for (i = len - 1; i >= 0; i--)
2886 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2887 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2888 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2889 return 1;
2890 }
2891 else if (GET_CODE (pat) == SET
2892 && SET_DEST (pat) == pc_rtx
2893 && computed_jump_p_1 (SET_SRC (pat)))
2894 return 1;
2895 }
2896 return 0;
2897 }
2898
2899 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2900 calls. Processes the subexpressions of EXP and passes them to F. */
2901 static int
2902 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2903 {
2904 int result, i, j;
2905 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2906 rtx *x;
2907
2908 for (; format[n] != '\0'; n++)
2909 {
2910 switch (format[n])
2911 {
2912 case 'e':
2913 /* Call F on X. */
2914 x = &XEXP (exp, n);
2915 result = (*f) (x, data);
2916 if (result == -1)
2917 /* Do not traverse sub-expressions. */
2918 continue;
2919 else if (result != 0)
2920 /* Stop the traversal. */
2921 return result;
2922
2923 if (*x == NULL_RTX)
2924 /* There are no sub-expressions. */
2925 continue;
2926
2927 i = non_rtx_starting_operands[GET_CODE (*x)];
2928 if (i >= 0)
2929 {
2930 result = for_each_rtx_1 (*x, i, f, data);
2931 if (result != 0)
2932 return result;
2933 }
2934 break;
2935
2936 case 'V':
2937 case 'E':
2938 if (XVEC (exp, n) == 0)
2939 continue;
2940 for (j = 0; j < XVECLEN (exp, n); ++j)
2941 {
2942 /* Call F on X. */
2943 x = &XVECEXP (exp, n, j);
2944 result = (*f) (x, data);
2945 if (result == -1)
2946 /* Do not traverse sub-expressions. */
2947 continue;
2948 else if (result != 0)
2949 /* Stop the traversal. */
2950 return result;
2951
2952 if (*x == NULL_RTX)
2953 /* There are no sub-expressions. */
2954 continue;
2955
2956 i = non_rtx_starting_operands[GET_CODE (*x)];
2957 if (i >= 0)
2958 {
2959 result = for_each_rtx_1 (*x, i, f, data);
2960 if (result != 0)
2961 return result;
2962 }
2963 }
2964 break;
2965
2966 default:
2967 /* Nothing to do. */
2968 break;
2969 }
2970 }
2971
2972 return 0;
2973 }
2974
2975 /* Traverse X via depth-first search, calling F for each
2976 sub-expression (including X itself). F is also passed the DATA.
2977 If F returns -1, do not traverse sub-expressions, but continue
2978 traversing the rest of the tree. If F ever returns any other
2979 nonzero value, stop the traversal, and return the value returned
2980 by F. Otherwise, return 0. This function does not traverse inside
2981 tree structure that contains RTX_EXPRs, or into sub-expressions
2982 whose format code is `0' since it is not known whether or not those
2983 codes are actually RTL.
2984
2985 This routine is very general, and could (should?) be used to
2986 implement many of the other routines in this file. */
2987
2988 int
2989 for_each_rtx (rtx *x, rtx_function f, void *data)
2990 {
2991 int result;
2992 int i;
2993
2994 /* Call F on X. */
2995 result = (*f) (x, data);
2996 if (result == -1)
2997 /* Do not traverse sub-expressions. */
2998 return 0;
2999 else if (result != 0)
3000 /* Stop the traversal. */
3001 return result;
3002
3003 if (*x == NULL_RTX)
3004 /* There are no sub-expressions. */
3005 return 0;
3006
3007 i = non_rtx_starting_operands[GET_CODE (*x)];
3008 if (i < 0)
3009 return 0;
3010
3011 return for_each_rtx_1 (*x, i, f, data);
3012 }
3013
3014 \f
3015
3016 /* Data structure that holds the internal state communicated between
3017 for_each_inc_dec, for_each_inc_dec_find_mem and
3018 for_each_inc_dec_find_inc_dec. */
3019
3020 struct for_each_inc_dec_ops {
3021 /* The function to be called for each autoinc operation found. */
3022 for_each_inc_dec_fn fn;
3023 /* The opaque argument to be passed to it. */
3024 void *arg;
3025 /* The MEM we're visiting, if any. */
3026 rtx mem;
3027 };
3028
3029 static int for_each_inc_dec_find_mem (rtx *r, void *d);
3030
3031 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
3032 operands of the equivalent add insn and pass the result to the
3033 operator specified by *D. */
3034
3035 static int
3036 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
3037 {
3038 rtx x = *r;
3039 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
3040
3041 switch (GET_CODE (x))
3042 {
3043 case PRE_INC:
3044 case POST_INC:
3045 {
3046 int size = GET_MODE_SIZE (GET_MODE (data->mem));
3047 rtx r1 = XEXP (x, 0);
3048 rtx c = gen_int_mode (size, GET_MODE (r1));
3049 return data->fn (data->mem, x, r1, r1, c, data->arg);
3050 }
3051
3052 case PRE_DEC:
3053 case POST_DEC:
3054 {
3055 int size = GET_MODE_SIZE (GET_MODE (data->mem));
3056 rtx r1 = XEXP (x, 0);
3057 rtx c = gen_int_mode (-size, GET_MODE (r1));
3058 return data->fn (data->mem, x, r1, r1, c, data->arg);
3059 }
3060
3061 case PRE_MODIFY:
3062 case POST_MODIFY:
3063 {
3064 rtx r1 = XEXP (x, 0);
3065 rtx add = XEXP (x, 1);
3066 return data->fn (data->mem, x, r1, add, NULL, data->arg);
3067 }
3068
3069 case MEM:
3070 {
3071 rtx save = data->mem;
3072 int ret = for_each_inc_dec_find_mem (r, d);
3073 data->mem = save;
3074 return ret;
3075 }
3076
3077 default:
3078 return 0;
3079 }
3080 }
3081
3082 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3083 address, extract the operands of the equivalent add insn and pass
3084 the result to the operator specified by *D. */
3085
3086 static int
3087 for_each_inc_dec_find_mem (rtx *r, void *d)
3088 {
3089 rtx x = *r;
3090 if (x != NULL_RTX && MEM_P (x))
3091 {
3092 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3093 int result;
3094
3095 data->mem = x;
3096
3097 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3098 data);
3099 if (result)
3100 return result;
3101
3102 return -1;
3103 }
3104 return 0;
3105 }
3106
3107 /* Traverse *X looking for MEMs, and for autoinc operations within
3108 them. For each such autoinc operation found, call FN, passing it
3109 the innermost enclosing MEM, the operation itself, the RTX modified
3110 by the operation, two RTXs (the second may be NULL) that, once
3111 added, represent the value to be held by the modified RTX
3112 afterwards, and ARG. FN is to return -1 to skip looking for other
3113 autoinc operations within the visited operation, 0 to continue the
3114 traversal, or any other value to have it returned to the caller of
3115 for_each_inc_dec. */
3116
3117 int
3118 for_each_inc_dec (rtx *x,
3119 for_each_inc_dec_fn fn,
3120 void *arg)
3121 {
3122 struct for_each_inc_dec_ops data;
3123
3124 data.fn = fn;
3125 data.arg = arg;
3126 data.mem = NULL;
3127
3128 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3129 }
3130
3131 \f
3132 /* Searches X for any reference to REGNO, returning the rtx of the
3133 reference found if any. Otherwise, returns NULL_RTX. */
3134
3135 rtx
3136 regno_use_in (unsigned int regno, rtx x)
3137 {
3138 const char *fmt;
3139 int i, j;
3140 rtx tem;
3141
3142 if (REG_P (x) && REGNO (x) == regno)
3143 return x;
3144
3145 fmt = GET_RTX_FORMAT (GET_CODE (x));
3146 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3147 {
3148 if (fmt[i] == 'e')
3149 {
3150 if ((tem = regno_use_in (regno, XEXP (x, i))))
3151 return tem;
3152 }
3153 else if (fmt[i] == 'E')
3154 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3155 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3156 return tem;
3157 }
3158
3159 return NULL_RTX;
3160 }
3161
3162 /* Return a value indicating whether OP, an operand of a commutative
3163 operation, is preferred as the first or second operand. The higher
3164 the value, the stronger the preference for being the first operand.
3165 We use negative values to indicate a preference for the first operand
3166 and positive values for the second operand. */
3167
3168 int
3169 commutative_operand_precedence (rtx op)
3170 {
3171 enum rtx_code code = GET_CODE (op);
3172
3173 /* Constants always come the second operand. Prefer "nice" constants. */
3174 if (code == CONST_INT)
3175 return -8;
3176 if (code == CONST_WIDE_INT)
3177 return -8;
3178 if (code == CONST_DOUBLE)
3179 return -7;
3180 if (code == CONST_FIXED)
3181 return -7;
3182 op = avoid_constant_pool_reference (op);
3183 code = GET_CODE (op);
3184
3185 switch (GET_RTX_CLASS (code))
3186 {
3187 case RTX_CONST_OBJ:
3188 if (code == CONST_INT)
3189 return -6;
3190 if (code == CONST_WIDE_INT)
3191 return -6;
3192 if (code == CONST_DOUBLE)
3193 return -5;
3194 if (code == CONST_FIXED)
3195 return -5;
3196 return -4;
3197
3198 case RTX_EXTRA:
3199 /* SUBREGs of objects should come second. */
3200 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3201 return -3;
3202 return 0;
3203
3204 case RTX_OBJ:
3205 /* Complex expressions should be the first, so decrease priority
3206 of objects. Prefer pointer objects over non pointer objects. */
3207 if ((REG_P (op) && REG_POINTER (op))
3208 || (MEM_P (op) && MEM_POINTER (op)))
3209 return -1;
3210 return -2;
3211
3212 case RTX_COMM_ARITH:
3213 /* Prefer operands that are themselves commutative to be first.
3214 This helps to make things linear. In particular,
3215 (and (and (reg) (reg)) (not (reg))) is canonical. */
3216 return 4;
3217
3218 case RTX_BIN_ARITH:
3219 /* If only one operand is a binary expression, it will be the first
3220 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3221 is canonical, although it will usually be further simplified. */
3222 return 2;
3223
3224 case RTX_UNARY:
3225 /* Then prefer NEG and NOT. */
3226 if (code == NEG || code == NOT)
3227 return 1;
3228
3229 default:
3230 return 0;
3231 }
3232 }
3233
3234 /* Return 1 iff it is necessary to swap operands of commutative operation
3235 in order to canonicalize expression. */
3236
3237 bool
3238 swap_commutative_operands_p (rtx x, rtx y)
3239 {
3240 return (commutative_operand_precedence (x)
3241 < commutative_operand_precedence (y));
3242 }
3243
3244 /* Return 1 if X is an autoincrement side effect and the register is
3245 not the stack pointer. */
3246 int
3247 auto_inc_p (const_rtx x)
3248 {
3249 switch (GET_CODE (x))
3250 {
3251 case PRE_INC:
3252 case POST_INC:
3253 case PRE_DEC:
3254 case POST_DEC:
3255 case PRE_MODIFY:
3256 case POST_MODIFY:
3257 /* There are no REG_INC notes for SP. */
3258 if (XEXP (x, 0) != stack_pointer_rtx)
3259 return 1;
3260 default:
3261 break;
3262 }
3263 return 0;
3264 }
3265
3266 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3267 int
3268 loc_mentioned_in_p (rtx *loc, const_rtx in)
3269 {
3270 enum rtx_code code;
3271 const char *fmt;
3272 int i, j;
3273
3274 if (!in)
3275 return 0;
3276
3277 code = GET_CODE (in);
3278 fmt = GET_RTX_FORMAT (code);
3279 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3280 {
3281 if (fmt[i] == 'e')
3282 {
3283 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3284 return 1;
3285 }
3286 else if (fmt[i] == 'E')
3287 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3288 if (loc == &XVECEXP (in, i, j)
3289 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3290 return 1;
3291 }
3292 return 0;
3293 }
3294
3295 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3296 and SUBREG_BYTE, return the bit offset where the subreg begins
3297 (counting from the least significant bit of the operand). */
3298
3299 unsigned int
3300 subreg_lsb_1 (enum machine_mode outer_mode,
3301 enum machine_mode inner_mode,
3302 unsigned int subreg_byte)
3303 {
3304 unsigned int bitpos;
3305 unsigned int byte;
3306 unsigned int word;
3307
3308 /* A paradoxical subreg begins at bit position 0. */
3309 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3310 return 0;
3311
3312 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3313 /* If the subreg crosses a word boundary ensure that
3314 it also begins and ends on a word boundary. */
3315 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3316 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3317 && (subreg_byte % UNITS_PER_WORD
3318 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3319
3320 if (WORDS_BIG_ENDIAN)
3321 word = (GET_MODE_SIZE (inner_mode)
3322 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3323 else
3324 word = subreg_byte / UNITS_PER_WORD;
3325 bitpos = word * BITS_PER_WORD;
3326
3327 if (BYTES_BIG_ENDIAN)
3328 byte = (GET_MODE_SIZE (inner_mode)
3329 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3330 else
3331 byte = subreg_byte % UNITS_PER_WORD;
3332 bitpos += byte * BITS_PER_UNIT;
3333
3334 return bitpos;
3335 }
3336
3337 /* Given a subreg X, return the bit offset where the subreg begins
3338 (counting from the least significant bit of the reg). */
3339
3340 unsigned int
3341 subreg_lsb (const_rtx x)
3342 {
3343 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3344 SUBREG_BYTE (x));
3345 }
3346
3347 /* Fill in information about a subreg of a hard register.
3348 xregno - A regno of an inner hard subreg_reg (or what will become one).
3349 xmode - The mode of xregno.
3350 offset - The byte offset.
3351 ymode - The mode of a top level SUBREG (or what may become one).
3352 info - Pointer to structure to fill in. */
3353 void
3354 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3355 unsigned int offset, enum machine_mode ymode,
3356 struct subreg_info *info)
3357 {
3358 int nregs_xmode, nregs_ymode;
3359 int mode_multiple, nregs_multiple;
3360 int offset_adj, y_offset, y_offset_adj;
3361 int regsize_xmode, regsize_ymode;
3362 bool rknown;
3363
3364 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3365
3366 rknown = false;
3367
3368 /* If there are holes in a non-scalar mode in registers, we expect
3369 that it is made up of its units concatenated together. */
3370 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3371 {
3372 enum machine_mode xmode_unit;
3373
3374 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3375 if (GET_MODE_INNER (xmode) == VOIDmode)
3376 xmode_unit = xmode;
3377 else
3378 xmode_unit = GET_MODE_INNER (xmode);
3379 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3380 gcc_assert (nregs_xmode
3381 == (GET_MODE_NUNITS (xmode)
3382 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3383 gcc_assert (hard_regno_nregs[xregno][xmode]
3384 == (hard_regno_nregs[xregno][xmode_unit]
3385 * GET_MODE_NUNITS (xmode)));
3386
3387 /* You can only ask for a SUBREG of a value with holes in the middle
3388 if you don't cross the holes. (Such a SUBREG should be done by
3389 picking a different register class, or doing it in memory if
3390 necessary.) An example of a value with holes is XCmode on 32-bit
3391 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3392 3 for each part, but in memory it's two 128-bit parts.
3393 Padding is assumed to be at the end (not necessarily the 'high part')
3394 of each unit. */
3395 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3396 < GET_MODE_NUNITS (xmode))
3397 && (offset / GET_MODE_SIZE (xmode_unit)
3398 != ((offset + GET_MODE_SIZE (ymode) - 1)
3399 / GET_MODE_SIZE (xmode_unit))))
3400 {
3401 info->representable_p = false;
3402 rknown = true;
3403 }
3404 }
3405 else
3406 nregs_xmode = hard_regno_nregs[xregno][xmode];
3407
3408 nregs_ymode = hard_regno_nregs[xregno][ymode];
3409
3410 /* Paradoxical subregs are otherwise valid. */
3411 if (!rknown
3412 && offset == 0
3413 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3414 {
3415 info->representable_p = true;
3416 /* If this is a big endian paradoxical subreg, which uses more
3417 actual hard registers than the original register, we must
3418 return a negative offset so that we find the proper highpart
3419 of the register. */
3420 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3421 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3422 info->offset = nregs_xmode - nregs_ymode;
3423 else
3424 info->offset = 0;
3425 info->nregs = nregs_ymode;
3426 return;
3427 }
3428
3429 /* If registers store different numbers of bits in the different
3430 modes, we cannot generally form this subreg. */
3431 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3432 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3433 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3434 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3435 {
3436 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3437 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3438 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3439 {
3440 info->representable_p = false;
3441 info->nregs
3442 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3443 info->offset = offset / regsize_xmode;
3444 return;
3445 }
3446 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3447 {
3448 info->representable_p = false;
3449 info->nregs
3450 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3451 info->offset = offset / regsize_xmode;
3452 return;
3453 }
3454 }
3455
3456 /* Lowpart subregs are otherwise valid. */
3457 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3458 {
3459 info->representable_p = true;
3460 rknown = true;
3461
3462 if (offset == 0 || nregs_xmode == nregs_ymode)
3463 {
3464 info->offset = 0;
3465 info->nregs = nregs_ymode;
3466 return;
3467 }
3468 }
3469
3470 /* This should always pass, otherwise we don't know how to verify
3471 the constraint. These conditions may be relaxed but
3472 subreg_regno_offset would need to be redesigned. */
3473 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3474 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3475
3476 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3477 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3478 {
3479 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3480 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3481 HOST_WIDE_INT off_low = offset & (ysize - 1);
3482 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3483 offset = (xsize - ysize - off_high) | off_low;
3484 }
3485 /* The XMODE value can be seen as a vector of NREGS_XMODE
3486 values. The subreg must represent a lowpart of given field.
3487 Compute what field it is. */
3488 offset_adj = offset;
3489 offset_adj -= subreg_lowpart_offset (ymode,
3490 mode_for_size (GET_MODE_BITSIZE (xmode)
3491 / nregs_xmode,
3492 MODE_INT, 0));
3493
3494 /* Size of ymode must not be greater than the size of xmode. */
3495 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3496 gcc_assert (mode_multiple != 0);
3497
3498 y_offset = offset / GET_MODE_SIZE (ymode);
3499 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3500 nregs_multiple = nregs_xmode / nregs_ymode;
3501
3502 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3503 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3504
3505 if (!rknown)
3506 {
3507 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3508 rknown = true;
3509 }
3510 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3511 info->nregs = nregs_ymode;
3512 }
3513
3514 /* This function returns the regno offset of a subreg expression.
3515 xregno - A regno of an inner hard subreg_reg (or what will become one).
3516 xmode - The mode of xregno.
3517 offset - The byte offset.
3518 ymode - The mode of a top level SUBREG (or what may become one).
3519 RETURN - The regno offset which would be used. */
3520 unsigned int
3521 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3522 unsigned int offset, enum machine_mode ymode)
3523 {
3524 struct subreg_info info;
3525 subreg_get_info (xregno, xmode, offset, ymode, &info);
3526 return info.offset;
3527 }
3528
3529 /* This function returns true when the offset is representable via
3530 subreg_offset in the given regno.
3531 xregno - A regno of an inner hard subreg_reg (or what will become one).
3532 xmode - The mode of xregno.
3533 offset - The byte offset.
3534 ymode - The mode of a top level SUBREG (or what may become one).
3535 RETURN - Whether the offset is representable. */
3536 bool
3537 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3538 unsigned int offset, enum machine_mode ymode)
3539 {
3540 struct subreg_info info;
3541 subreg_get_info (xregno, xmode, offset, ymode, &info);
3542 return info.representable_p;
3543 }
3544
3545 /* Return the number of a YMODE register to which
3546
3547 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3548
3549 can be simplified. Return -1 if the subreg can't be simplified.
3550
3551 XREGNO is a hard register number. */
3552
3553 int
3554 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3555 unsigned int offset, enum machine_mode ymode)
3556 {
3557 struct subreg_info info;
3558 unsigned int yregno;
3559
3560 #ifdef CANNOT_CHANGE_MODE_CLASS
3561 /* Give the backend a chance to disallow the mode change. */
3562 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3563 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3564 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3565 /* We can use mode change in LRA for some transformations. */
3566 && ! lra_in_progress)
3567 return -1;
3568 #endif
3569
3570 /* We shouldn't simplify stack-related registers. */
3571 if ((!reload_completed || frame_pointer_needed)
3572 && xregno == FRAME_POINTER_REGNUM)
3573 return -1;
3574
3575 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3576 && xregno == ARG_POINTER_REGNUM)
3577 return -1;
3578
3579 if (xregno == STACK_POINTER_REGNUM
3580 /* We should convert hard stack register in LRA if it is
3581 possible. */
3582 && ! lra_in_progress)
3583 return -1;
3584
3585 /* Try to get the register offset. */
3586 subreg_get_info (xregno, xmode, offset, ymode, &info);
3587 if (!info.representable_p)
3588 return -1;
3589
3590 /* Make sure that the offsetted register value is in range. */
3591 yregno = xregno + info.offset;
3592 if (!HARD_REGISTER_NUM_P (yregno))
3593 return -1;
3594
3595 /* See whether (reg:YMODE YREGNO) is valid.
3596
3597 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3598 This is a kludge to work around how complex FP arguments are passed
3599 on IA-64 and should be fixed. See PR target/49226. */
3600 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3601 && HARD_REGNO_MODE_OK (xregno, xmode))
3602 return -1;
3603
3604 return (int) yregno;
3605 }
3606
3607 /* Return the final regno that a subreg expression refers to. */
3608 unsigned int
3609 subreg_regno (const_rtx x)
3610 {
3611 unsigned int ret;
3612 rtx subreg = SUBREG_REG (x);
3613 int regno = REGNO (subreg);
3614
3615 ret = regno + subreg_regno_offset (regno,
3616 GET_MODE (subreg),
3617 SUBREG_BYTE (x),
3618 GET_MODE (x));
3619 return ret;
3620
3621 }
3622
3623 /* Return the number of registers that a subreg expression refers
3624 to. */
3625 unsigned int
3626 subreg_nregs (const_rtx x)
3627 {
3628 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3629 }
3630
3631 /* Return the number of registers that a subreg REG with REGNO
3632 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3633 changed so that the regno can be passed in. */
3634
3635 unsigned int
3636 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3637 {
3638 struct subreg_info info;
3639 rtx subreg = SUBREG_REG (x);
3640
3641 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3642 &info);
3643 return info.nregs;
3644 }
3645
3646
3647 struct parms_set_data
3648 {
3649 int nregs;
3650 HARD_REG_SET regs;
3651 };
3652
3653 /* Helper function for noticing stores to parameter registers. */
3654 static void
3655 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3656 {
3657 struct parms_set_data *const d = (struct parms_set_data *) data;
3658 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3659 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3660 {
3661 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3662 d->nregs--;
3663 }
3664 }
3665
3666 /* Look backward for first parameter to be loaded.
3667 Note that loads of all parameters will not necessarily be
3668 found if CSE has eliminated some of them (e.g., an argument
3669 to the outer function is passed down as a parameter).
3670 Do not skip BOUNDARY. */
3671 rtx
3672 find_first_parameter_load (rtx call_insn, rtx boundary)
3673 {
3674 struct parms_set_data parm;
3675 rtx p, before, first_set;
3676
3677 /* Since different machines initialize their parameter registers
3678 in different orders, assume nothing. Collect the set of all
3679 parameter registers. */
3680 CLEAR_HARD_REG_SET (parm.regs);
3681 parm.nregs = 0;
3682 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3683 if (GET_CODE (XEXP (p, 0)) == USE
3684 && REG_P (XEXP (XEXP (p, 0), 0)))
3685 {
3686 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3687
3688 /* We only care about registers which can hold function
3689 arguments. */
3690 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3691 continue;
3692
3693 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3694 parm.nregs++;
3695 }
3696 before = call_insn;
3697 first_set = call_insn;
3698
3699 /* Search backward for the first set of a register in this set. */
3700 while (parm.nregs && before != boundary)
3701 {
3702 before = PREV_INSN (before);
3703
3704 /* It is possible that some loads got CSEed from one call to
3705 another. Stop in that case. */
3706 if (CALL_P (before))
3707 break;
3708
3709 /* Our caller needs either ensure that we will find all sets
3710 (in case code has not been optimized yet), or take care
3711 for possible labels in a way by setting boundary to preceding
3712 CODE_LABEL. */
3713 if (LABEL_P (before))
3714 {
3715 gcc_assert (before == boundary);
3716 break;
3717 }
3718
3719 if (INSN_P (before))
3720 {
3721 int nregs_old = parm.nregs;
3722 note_stores (PATTERN (before), parms_set, &parm);
3723 /* If we found something that did not set a parameter reg,
3724 we're done. Do not keep going, as that might result
3725 in hoisting an insn before the setting of a pseudo
3726 that is used by the hoisted insn. */
3727 if (nregs_old != parm.nregs)
3728 first_set = before;
3729 else
3730 break;
3731 }
3732 }
3733 return first_set;
3734 }
3735
3736 /* Return true if we should avoid inserting code between INSN and preceding
3737 call instruction. */
3738
3739 bool
3740 keep_with_call_p (const_rtx insn)
3741 {
3742 rtx set;
3743
3744 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3745 {
3746 if (REG_P (SET_DEST (set))
3747 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3748 && fixed_regs[REGNO (SET_DEST (set))]
3749 && general_operand (SET_SRC (set), VOIDmode))
3750 return true;
3751 if (REG_P (SET_SRC (set))
3752 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3753 && REG_P (SET_DEST (set))
3754 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3755 return true;
3756 /* There may be a stack pop just after the call and before the store
3757 of the return register. Search for the actual store when deciding
3758 if we can break or not. */
3759 if (SET_DEST (set) == stack_pointer_rtx)
3760 {
3761 /* This CONST_CAST is okay because next_nonnote_insn just
3762 returns its argument and we assign it to a const_rtx
3763 variable. */
3764 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX (insn));
3765 if (i2 && keep_with_call_p (i2))
3766 return true;
3767 }
3768 }
3769 return false;
3770 }
3771
3772 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3773 to non-complex jumps. That is, direct unconditional, conditional,
3774 and tablejumps, but not computed jumps or returns. It also does
3775 not apply to the fallthru case of a conditional jump. */
3776
3777 bool
3778 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3779 {
3780 rtx tmp = JUMP_LABEL (jump_insn);
3781
3782 if (label == tmp)
3783 return true;
3784
3785 if (tablejump_p (jump_insn, NULL, &tmp))
3786 {
3787 rtvec vec = XVEC (PATTERN (tmp),
3788 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3789 int i, veclen = GET_NUM_ELEM (vec);
3790
3791 for (i = 0; i < veclen; ++i)
3792 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3793 return true;
3794 }
3795
3796 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3797 return true;
3798
3799 return false;
3800 }
3801
3802 \f
3803 /* Return an estimate of the cost of computing rtx X.
3804 One use is in cse, to decide which expression to keep in the hash table.
3805 Another is in rtl generation, to pick the cheapest way to multiply.
3806 Other uses like the latter are expected in the future.
3807
3808 X appears as operand OPNO in an expression with code OUTER_CODE.
3809 SPEED specifies whether costs optimized for speed or size should
3810 be returned. */
3811
3812 int
3813 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3814 {
3815 int i, j;
3816 enum rtx_code code;
3817 const char *fmt;
3818 int total;
3819 int factor;
3820
3821 if (x == 0)
3822 return 0;
3823
3824 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3825 many insns, taking N times as long. */
3826 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3827 if (factor == 0)
3828 factor = 1;
3829
3830 /* Compute the default costs of certain things.
3831 Note that targetm.rtx_costs can override the defaults. */
3832
3833 code = GET_CODE (x);
3834 switch (code)
3835 {
3836 case MULT:
3837 /* Multiplication has time-complexity O(N*N), where N is the
3838 number of units (translated from digits) when using
3839 schoolbook long multiplication. */
3840 total = factor * factor * COSTS_N_INSNS (5);
3841 break;
3842 case DIV:
3843 case UDIV:
3844 case MOD:
3845 case UMOD:
3846 /* Similarly, complexity for schoolbook long division. */
3847 total = factor * factor * COSTS_N_INSNS (7);
3848 break;
3849 case USE:
3850 /* Used in combine.c as a marker. */
3851 total = 0;
3852 break;
3853 case SET:
3854 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3855 the mode for the factor. */
3856 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3857 if (factor == 0)
3858 factor = 1;
3859 /* Pass through. */
3860 default:
3861 total = factor * COSTS_N_INSNS (1);
3862 }
3863
3864 switch (code)
3865 {
3866 case REG:
3867 return 0;
3868
3869 case SUBREG:
3870 total = 0;
3871 /* If we can't tie these modes, make this expensive. The larger
3872 the mode, the more expensive it is. */
3873 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3874 return COSTS_N_INSNS (2 + factor);
3875 break;
3876
3877 default:
3878 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3879 return total;
3880 break;
3881 }
3882
3883 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3884 which is already in total. */
3885
3886 fmt = GET_RTX_FORMAT (code);
3887 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3888 if (fmt[i] == 'e')
3889 total += rtx_cost (XEXP (x, i), code, i, speed);
3890 else if (fmt[i] == 'E')
3891 for (j = 0; j < XVECLEN (x, i); j++)
3892 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3893
3894 return total;
3895 }
3896
3897 /* Fill in the structure C with information about both speed and size rtx
3898 costs for X, which is operand OPNO in an expression with code OUTER. */
3899
3900 void
3901 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3902 struct full_rtx_costs *c)
3903 {
3904 c->speed = rtx_cost (x, outer, opno, true);
3905 c->size = rtx_cost (x, outer, opno, false);
3906 }
3907
3908 \f
3909 /* Return cost of address expression X.
3910 Expect that X is properly formed address reference.
3911
3912 SPEED parameter specify whether costs optimized for speed or size should
3913 be returned. */
3914
3915 int
3916 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3917 {
3918 /* We may be asked for cost of various unusual addresses, such as operands
3919 of push instruction. It is not worthwhile to complicate writing
3920 of the target hook by such cases. */
3921
3922 if (!memory_address_addr_space_p (mode, x, as))
3923 return 1000;
3924
3925 return targetm.address_cost (x, mode, as, speed);
3926 }
3927
3928 /* If the target doesn't override, compute the cost as with arithmetic. */
3929
3930 int
3931 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3932 {
3933 return rtx_cost (x, MEM, 0, speed);
3934 }
3935 \f
3936
3937 unsigned HOST_WIDE_INT
3938 nonzero_bits (const_rtx x, enum machine_mode mode)
3939 {
3940 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3941 }
3942
3943 unsigned int
3944 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3945 {
3946 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3947 }
3948
3949 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3950 It avoids exponential behavior in nonzero_bits1 when X has
3951 identical subexpressions on the first or the second level. */
3952
3953 static unsigned HOST_WIDE_INT
3954 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3955 enum machine_mode known_mode,
3956 unsigned HOST_WIDE_INT known_ret)
3957 {
3958 if (x == known_x && mode == known_mode)
3959 return known_ret;
3960
3961 /* Try to find identical subexpressions. If found call
3962 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3963 precomputed value for the subexpression as KNOWN_RET. */
3964
3965 if (ARITHMETIC_P (x))
3966 {
3967 rtx x0 = XEXP (x, 0);
3968 rtx x1 = XEXP (x, 1);
3969
3970 /* Check the first level. */
3971 if (x0 == x1)
3972 return nonzero_bits1 (x, mode, x0, mode,
3973 cached_nonzero_bits (x0, mode, known_x,
3974 known_mode, known_ret));
3975
3976 /* Check the second level. */
3977 if (ARITHMETIC_P (x0)
3978 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3979 return nonzero_bits1 (x, mode, x1, mode,
3980 cached_nonzero_bits (x1, mode, known_x,
3981 known_mode, known_ret));
3982
3983 if (ARITHMETIC_P (x1)
3984 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3985 return nonzero_bits1 (x, mode, x0, mode,
3986 cached_nonzero_bits (x0, mode, known_x,
3987 known_mode, known_ret));
3988 }
3989
3990 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3991 }
3992
3993 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3994 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3995 is less useful. We can't allow both, because that results in exponential
3996 run time recursion. There is a nullstone testcase that triggered
3997 this. This macro avoids accidental uses of num_sign_bit_copies. */
3998 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3999
4000 /* Given an expression, X, compute which bits in X can be nonzero.
4001 We don't care about bits outside of those defined in MODE.
4002
4003 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4004 an arithmetic operation, we can do better. */
4005
4006 static unsigned HOST_WIDE_INT
4007 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4008 enum machine_mode known_mode,
4009 unsigned HOST_WIDE_INT known_ret)
4010 {
4011 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4012 unsigned HOST_WIDE_INT inner_nz;
4013 enum rtx_code code;
4014 enum machine_mode inner_mode;
4015 unsigned int mode_width = GET_MODE_PRECISION (mode);
4016
4017 /* For floating-point and vector values, assume all bits are needed. */
4018 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4019 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4020 return nonzero;
4021
4022 /* If X is wider than MODE, use its mode instead. */
4023 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4024 {
4025 mode = GET_MODE (x);
4026 nonzero = GET_MODE_MASK (mode);
4027 mode_width = GET_MODE_PRECISION (mode);
4028 }
4029
4030 if (mode_width > HOST_BITS_PER_WIDE_INT)
4031 /* Our only callers in this case look for single bit values. So
4032 just return the mode mask. Those tests will then be false. */
4033 return nonzero;
4034
4035 #ifndef WORD_REGISTER_OPERATIONS
4036 /* If MODE is wider than X, but both are a single word for both the host
4037 and target machines, we can compute this from which bits of the
4038 object might be nonzero in its own mode, taking into account the fact
4039 that on many CISC machines, accessing an object in a wider mode
4040 causes the high-order bits to become undefined. So they are
4041 not known to be zero. */
4042
4043 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4044 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4045 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4046 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4047 {
4048 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4049 known_x, known_mode, known_ret);
4050 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4051 return nonzero;
4052 }
4053 #endif
4054
4055 code = GET_CODE (x);
4056 switch (code)
4057 {
4058 case REG:
4059 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4060 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4061 all the bits above ptr_mode are known to be zero. */
4062 /* As we do not know which address space the pointer is referring to,
4063 we can do this only if the target does not support different pointer
4064 or address modes depending on the address space. */
4065 if (target_default_pointer_address_modes_p ()
4066 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4067 && REG_POINTER (x))
4068 nonzero &= GET_MODE_MASK (ptr_mode);
4069 #endif
4070
4071 /* Include declared information about alignment of pointers. */
4072 /* ??? We don't properly preserve REG_POINTER changes across
4073 pointer-to-integer casts, so we can't trust it except for
4074 things that we know must be pointers. See execute/960116-1.c. */
4075 if ((x == stack_pointer_rtx
4076 || x == frame_pointer_rtx
4077 || x == arg_pointer_rtx)
4078 && REGNO_POINTER_ALIGN (REGNO (x)))
4079 {
4080 unsigned HOST_WIDE_INT alignment
4081 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4082
4083 #ifdef PUSH_ROUNDING
4084 /* If PUSH_ROUNDING is defined, it is possible for the
4085 stack to be momentarily aligned only to that amount,
4086 so we pick the least alignment. */
4087 if (x == stack_pointer_rtx && PUSH_ARGS)
4088 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4089 alignment);
4090 #endif
4091
4092 nonzero &= ~(alignment - 1);
4093 }
4094
4095 {
4096 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4097 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4098 known_mode, known_ret,
4099 &nonzero_for_hook);
4100
4101 if (new_rtx)
4102 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4103 known_mode, known_ret);
4104
4105 return nonzero_for_hook;
4106 }
4107
4108 case CONST_INT:
4109 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4110 /* If X is negative in MODE, sign-extend the value. */
4111 if (INTVAL (x) > 0
4112 && mode_width < BITS_PER_WORD
4113 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4114 != 0)
4115 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4116 #endif
4117
4118 return UINTVAL (x);
4119
4120 case MEM:
4121 #ifdef LOAD_EXTEND_OP
4122 /* In many, if not most, RISC machines, reading a byte from memory
4123 zeros the rest of the register. Noticing that fact saves a lot
4124 of extra zero-extends. */
4125 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4126 nonzero &= GET_MODE_MASK (GET_MODE (x));
4127 #endif
4128 break;
4129
4130 case EQ: case NE:
4131 case UNEQ: case LTGT:
4132 case GT: case GTU: case UNGT:
4133 case LT: case LTU: case UNLT:
4134 case GE: case GEU: case UNGE:
4135 case LE: case LEU: case UNLE:
4136 case UNORDERED: case ORDERED:
4137 /* If this produces an integer result, we know which bits are set.
4138 Code here used to clear bits outside the mode of X, but that is
4139 now done above. */
4140 /* Mind that MODE is the mode the caller wants to look at this
4141 operation in, and not the actual operation mode. We can wind
4142 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4143 that describes the results of a vector compare. */
4144 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4145 && mode_width <= HOST_BITS_PER_WIDE_INT)
4146 nonzero = STORE_FLAG_VALUE;
4147 break;
4148
4149 case NEG:
4150 #if 0
4151 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4152 and num_sign_bit_copies. */
4153 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4154 == GET_MODE_PRECISION (GET_MODE (x)))
4155 nonzero = 1;
4156 #endif
4157
4158 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4159 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4160 break;
4161
4162 case ABS:
4163 #if 0
4164 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4165 and num_sign_bit_copies. */
4166 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4167 == GET_MODE_PRECISION (GET_MODE (x)))
4168 nonzero = 1;
4169 #endif
4170 break;
4171
4172 case TRUNCATE:
4173 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4174 known_x, known_mode, known_ret)
4175 & GET_MODE_MASK (mode));
4176 break;
4177
4178 case ZERO_EXTEND:
4179 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4180 known_x, known_mode, known_ret);
4181 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4182 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4183 break;
4184
4185 case SIGN_EXTEND:
4186 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4187 Otherwise, show all the bits in the outer mode but not the inner
4188 may be nonzero. */
4189 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4190 known_x, known_mode, known_ret);
4191 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4192 {
4193 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4194 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4195 inner_nz |= (GET_MODE_MASK (mode)
4196 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4197 }
4198
4199 nonzero &= inner_nz;
4200 break;
4201
4202 case AND:
4203 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4204 known_x, known_mode, known_ret)
4205 & cached_nonzero_bits (XEXP (x, 1), mode,
4206 known_x, known_mode, known_ret);
4207 break;
4208
4209 case XOR: case IOR:
4210 case UMIN: case UMAX: case SMIN: case SMAX:
4211 {
4212 unsigned HOST_WIDE_INT nonzero0
4213 = cached_nonzero_bits (XEXP (x, 0), mode,
4214 known_x, known_mode, known_ret);
4215
4216 /* Don't call nonzero_bits for the second time if it cannot change
4217 anything. */
4218 if ((nonzero & nonzero0) != nonzero)
4219 nonzero &= nonzero0
4220 | cached_nonzero_bits (XEXP (x, 1), mode,
4221 known_x, known_mode, known_ret);
4222 }
4223 break;
4224
4225 case PLUS: case MINUS:
4226 case MULT:
4227 case DIV: case UDIV:
4228 case MOD: case UMOD:
4229 /* We can apply the rules of arithmetic to compute the number of
4230 high- and low-order zero bits of these operations. We start by
4231 computing the width (position of the highest-order nonzero bit)
4232 and the number of low-order zero bits for each value. */
4233 {
4234 unsigned HOST_WIDE_INT nz0
4235 = cached_nonzero_bits (XEXP (x, 0), mode,
4236 known_x, known_mode, known_ret);
4237 unsigned HOST_WIDE_INT nz1
4238 = cached_nonzero_bits (XEXP (x, 1), mode,
4239 known_x, known_mode, known_ret);
4240 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4241 int width0 = floor_log2 (nz0) + 1;
4242 int width1 = floor_log2 (nz1) + 1;
4243 int low0 = floor_log2 (nz0 & -nz0);
4244 int low1 = floor_log2 (nz1 & -nz1);
4245 unsigned HOST_WIDE_INT op0_maybe_minusp
4246 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4247 unsigned HOST_WIDE_INT op1_maybe_minusp
4248 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4249 unsigned int result_width = mode_width;
4250 int result_low = 0;
4251
4252 switch (code)
4253 {
4254 case PLUS:
4255 result_width = MAX (width0, width1) + 1;
4256 result_low = MIN (low0, low1);
4257 break;
4258 case MINUS:
4259 result_low = MIN (low0, low1);
4260 break;
4261 case MULT:
4262 result_width = width0 + width1;
4263 result_low = low0 + low1;
4264 break;
4265 case DIV:
4266 if (width1 == 0)
4267 break;
4268 if (!op0_maybe_minusp && !op1_maybe_minusp)
4269 result_width = width0;
4270 break;
4271 case UDIV:
4272 if (width1 == 0)
4273 break;
4274 result_width = width0;
4275 break;
4276 case MOD:
4277 if (width1 == 0)
4278 break;
4279 if (!op0_maybe_minusp && !op1_maybe_minusp)
4280 result_width = MIN (width0, width1);
4281 result_low = MIN (low0, low1);
4282 break;
4283 case UMOD:
4284 if (width1 == 0)
4285 break;
4286 result_width = MIN (width0, width1);
4287 result_low = MIN (low0, low1);
4288 break;
4289 default:
4290 gcc_unreachable ();
4291 }
4292
4293 if (result_width < mode_width)
4294 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4295
4296 if (result_low > 0)
4297 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4298 }
4299 break;
4300
4301 case ZERO_EXTRACT:
4302 if (CONST_INT_P (XEXP (x, 1))
4303 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4304 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4305 break;
4306
4307 case SUBREG:
4308 /* If this is a SUBREG formed for a promoted variable that has
4309 been zero-extended, we know that at least the high-order bits
4310 are zero, though others might be too. */
4311
4312 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4313 nonzero = GET_MODE_MASK (GET_MODE (x))
4314 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4315 known_x, known_mode, known_ret);
4316
4317 inner_mode = GET_MODE (SUBREG_REG (x));
4318 /* If the inner mode is a single word for both the host and target
4319 machines, we can compute this from which bits of the inner
4320 object might be nonzero. */
4321 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4322 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4323 {
4324 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4325 known_x, known_mode, known_ret);
4326
4327 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4328 /* If this is a typical RISC machine, we only have to worry
4329 about the way loads are extended. */
4330 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4331 ? val_signbit_known_set_p (inner_mode, nonzero)
4332 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4333 || !MEM_P (SUBREG_REG (x)))
4334 #endif
4335 {
4336 /* On many CISC machines, accessing an object in a wider mode
4337 causes the high-order bits to become undefined. So they are
4338 not known to be zero. */
4339 if (GET_MODE_PRECISION (GET_MODE (x))
4340 > GET_MODE_PRECISION (inner_mode))
4341 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4342 & ~GET_MODE_MASK (inner_mode));
4343 }
4344 }
4345 break;
4346
4347 case ASHIFTRT:
4348 case LSHIFTRT:
4349 case ASHIFT:
4350 case ROTATE:
4351 /* The nonzero bits are in two classes: any bits within MODE
4352 that aren't in GET_MODE (x) are always significant. The rest of the
4353 nonzero bits are those that are significant in the operand of
4354 the shift when shifted the appropriate number of bits. This
4355 shows that high-order bits are cleared by the right shift and
4356 low-order bits by left shifts. */
4357 if (CONST_INT_P (XEXP (x, 1))
4358 && INTVAL (XEXP (x, 1)) >= 0
4359 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4360 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4361 {
4362 enum machine_mode inner_mode = GET_MODE (x);
4363 unsigned int width = GET_MODE_PRECISION (inner_mode);
4364 int count = INTVAL (XEXP (x, 1));
4365 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4366 unsigned HOST_WIDE_INT op_nonzero
4367 = cached_nonzero_bits (XEXP (x, 0), mode,
4368 known_x, known_mode, known_ret);
4369 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4370 unsigned HOST_WIDE_INT outer = 0;
4371
4372 if (mode_width > width)
4373 outer = (op_nonzero & nonzero & ~mode_mask);
4374
4375 if (code == LSHIFTRT)
4376 inner >>= count;
4377 else if (code == ASHIFTRT)
4378 {
4379 inner >>= count;
4380
4381 /* If the sign bit may have been nonzero before the shift, we
4382 need to mark all the places it could have been copied to
4383 by the shift as possibly nonzero. */
4384 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4385 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4386 << (width - count);
4387 }
4388 else if (code == ASHIFT)
4389 inner <<= count;
4390 else
4391 inner = ((inner << (count % width)
4392 | (inner >> (width - (count % width)))) & mode_mask);
4393
4394 nonzero &= (outer | inner);
4395 }
4396 break;
4397
4398 case FFS:
4399 case POPCOUNT:
4400 /* This is at most the number of bits in the mode. */
4401 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4402 break;
4403
4404 case CLZ:
4405 /* If CLZ has a known value at zero, then the nonzero bits are
4406 that value, plus the number of bits in the mode minus one. */
4407 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4408 nonzero
4409 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4410 else
4411 nonzero = -1;
4412 break;
4413
4414 case CTZ:
4415 /* If CTZ has a known value at zero, then the nonzero bits are
4416 that value, plus the number of bits in the mode minus one. */
4417 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4418 nonzero
4419 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4420 else
4421 nonzero = -1;
4422 break;
4423
4424 case CLRSB:
4425 /* This is at most the number of bits in the mode minus 1. */
4426 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4427 break;
4428
4429 case PARITY:
4430 nonzero = 1;
4431 break;
4432
4433 case IF_THEN_ELSE:
4434 {
4435 unsigned HOST_WIDE_INT nonzero_true
4436 = cached_nonzero_bits (XEXP (x, 1), mode,
4437 known_x, known_mode, known_ret);
4438
4439 /* Don't call nonzero_bits for the second time if it cannot change
4440 anything. */
4441 if ((nonzero & nonzero_true) != nonzero)
4442 nonzero &= nonzero_true
4443 | cached_nonzero_bits (XEXP (x, 2), mode,
4444 known_x, known_mode, known_ret);
4445 }
4446 break;
4447
4448 default:
4449 break;
4450 }
4451
4452 return nonzero;
4453 }
4454
4455 /* See the macro definition above. */
4456 #undef cached_num_sign_bit_copies
4457
4458 \f
4459 /* The function cached_num_sign_bit_copies is a wrapper around
4460 num_sign_bit_copies1. It avoids exponential behavior in
4461 num_sign_bit_copies1 when X has identical subexpressions on the
4462 first or the second level. */
4463
4464 static unsigned int
4465 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4466 enum machine_mode known_mode,
4467 unsigned int known_ret)
4468 {
4469 if (x == known_x && mode == known_mode)
4470 return known_ret;
4471
4472 /* Try to find identical subexpressions. If found call
4473 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4474 the precomputed value for the subexpression as KNOWN_RET. */
4475
4476 if (ARITHMETIC_P (x))
4477 {
4478 rtx x0 = XEXP (x, 0);
4479 rtx x1 = XEXP (x, 1);
4480
4481 /* Check the first level. */
4482 if (x0 == x1)
4483 return
4484 num_sign_bit_copies1 (x, mode, x0, mode,
4485 cached_num_sign_bit_copies (x0, mode, known_x,
4486 known_mode,
4487 known_ret));
4488
4489 /* Check the second level. */
4490 if (ARITHMETIC_P (x0)
4491 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4492 return
4493 num_sign_bit_copies1 (x, mode, x1, mode,
4494 cached_num_sign_bit_copies (x1, mode, known_x,
4495 known_mode,
4496 known_ret));
4497
4498 if (ARITHMETIC_P (x1)
4499 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4500 return
4501 num_sign_bit_copies1 (x, mode, x0, mode,
4502 cached_num_sign_bit_copies (x0, mode, known_x,
4503 known_mode,
4504 known_ret));
4505 }
4506
4507 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4508 }
4509
4510 /* Return the number of bits at the high-order end of X that are known to
4511 be equal to the sign bit. X will be used in mode MODE; if MODE is
4512 VOIDmode, X will be used in its own mode. The returned value will always
4513 be between 1 and the number of bits in MODE. */
4514
4515 static unsigned int
4516 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4517 enum machine_mode known_mode,
4518 unsigned int known_ret)
4519 {
4520 enum rtx_code code = GET_CODE (x);
4521 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4522 int num0, num1, result;
4523 unsigned HOST_WIDE_INT nonzero;
4524
4525 /* If we weren't given a mode, use the mode of X. If the mode is still
4526 VOIDmode, we don't know anything. Likewise if one of the modes is
4527 floating-point. */
4528
4529 if (mode == VOIDmode)
4530 mode = GET_MODE (x);
4531
4532 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4533 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4534 return 1;
4535
4536 /* For a smaller object, just ignore the high bits. */
4537 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4538 {
4539 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4540 known_x, known_mode, known_ret);
4541 return MAX (1,
4542 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4543 }
4544
4545 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4546 {
4547 #ifndef WORD_REGISTER_OPERATIONS
4548 /* If this machine does not do all register operations on the entire
4549 register and MODE is wider than the mode of X, we can say nothing
4550 at all about the high-order bits. */
4551 return 1;
4552 #else
4553 /* Likewise on machines that do, if the mode of the object is smaller
4554 than a word and loads of that size don't sign extend, we can say
4555 nothing about the high order bits. */
4556 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4557 #ifdef LOAD_EXTEND_OP
4558 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4559 #endif
4560 )
4561 return 1;
4562 #endif
4563 }
4564
4565 switch (code)
4566 {
4567 case REG:
4568
4569 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4570 /* If pointers extend signed and this is a pointer in Pmode, say that
4571 all the bits above ptr_mode are known to be sign bit copies. */
4572 /* As we do not know which address space the pointer is referring to,
4573 we can do this only if the target does not support different pointer
4574 or address modes depending on the address space. */
4575 if (target_default_pointer_address_modes_p ()
4576 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4577 && mode == Pmode && REG_POINTER (x))
4578 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4579 #endif
4580
4581 {
4582 unsigned int copies_for_hook = 1, copies = 1;
4583 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4584 known_mode, known_ret,
4585 &copies_for_hook);
4586
4587 if (new_rtx)
4588 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4589 known_mode, known_ret);
4590
4591 if (copies > 1 || copies_for_hook > 1)
4592 return MAX (copies, copies_for_hook);
4593
4594 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4595 }
4596 break;
4597
4598 case MEM:
4599 #ifdef LOAD_EXTEND_OP
4600 /* Some RISC machines sign-extend all loads of smaller than a word. */
4601 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4602 return MAX (1, ((int) bitwidth
4603 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4604 #endif
4605 break;
4606
4607 case CONST_INT:
4608 /* If the constant is negative, take its 1's complement and remask.
4609 Then see how many zero bits we have. */
4610 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4611 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4612 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4613 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4614
4615 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4616
4617 case SUBREG:
4618 /* If this is a SUBREG for a promoted object that is sign-extended
4619 and we are looking at it in a wider mode, we know that at least the
4620 high-order bits are known to be sign bit copies. */
4621
4622 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4623 {
4624 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4625 known_x, known_mode, known_ret);
4626 return MAX ((int) bitwidth
4627 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4628 num0);
4629 }
4630
4631 /* For a smaller object, just ignore the high bits. */
4632 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4633 {
4634 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4635 known_x, known_mode, known_ret);
4636 return MAX (1, (num0
4637 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4638 - bitwidth)));
4639 }
4640
4641 #ifdef WORD_REGISTER_OPERATIONS
4642 #ifdef LOAD_EXTEND_OP
4643 /* For paradoxical SUBREGs on machines where all register operations
4644 affect the entire register, just look inside. Note that we are
4645 passing MODE to the recursive call, so the number of sign bit copies
4646 will remain relative to that mode, not the inner mode. */
4647
4648 /* This works only if loads sign extend. Otherwise, if we get a
4649 reload for the inner part, it may be loaded from the stack, and
4650 then we lose all sign bit copies that existed before the store
4651 to the stack. */
4652
4653 if (paradoxical_subreg_p (x)
4654 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4655 && MEM_P (SUBREG_REG (x)))
4656 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4657 known_x, known_mode, known_ret);
4658 #endif
4659 #endif
4660 break;
4661
4662 case SIGN_EXTRACT:
4663 if (CONST_INT_P (XEXP (x, 1)))
4664 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4665 break;
4666
4667 case SIGN_EXTEND:
4668 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4669 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4670 known_x, known_mode, known_ret));
4671
4672 case TRUNCATE:
4673 /* For a smaller object, just ignore the high bits. */
4674 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4675 known_x, known_mode, known_ret);
4676 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4677 - bitwidth)));
4678
4679 case NOT:
4680 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4681 known_x, known_mode, known_ret);
4682
4683 case ROTATE: case ROTATERT:
4684 /* If we are rotating left by a number of bits less than the number
4685 of sign bit copies, we can just subtract that amount from the
4686 number. */
4687 if (CONST_INT_P (XEXP (x, 1))
4688 && INTVAL (XEXP (x, 1)) >= 0
4689 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4690 {
4691 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4692 known_x, known_mode, known_ret);
4693 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4694 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4695 }
4696 break;
4697
4698 case NEG:
4699 /* In general, this subtracts one sign bit copy. But if the value
4700 is known to be positive, the number of sign bit copies is the
4701 same as that of the input. Finally, if the input has just one bit
4702 that might be nonzero, all the bits are copies of the sign bit. */
4703 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4704 known_x, known_mode, known_ret);
4705 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4706 return num0 > 1 ? num0 - 1 : 1;
4707
4708 nonzero = nonzero_bits (XEXP (x, 0), mode);
4709 if (nonzero == 1)
4710 return bitwidth;
4711
4712 if (num0 > 1
4713 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4714 num0--;
4715
4716 return num0;
4717
4718 case IOR: case AND: case XOR:
4719 case SMIN: case SMAX: case UMIN: case UMAX:
4720 /* Logical operations will preserve the number of sign-bit copies.
4721 MIN and MAX operations always return one of the operands. */
4722 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4723 known_x, known_mode, known_ret);
4724 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4725 known_x, known_mode, known_ret);
4726
4727 /* If num1 is clearing some of the top bits then regardless of
4728 the other term, we are guaranteed to have at least that many
4729 high-order zero bits. */
4730 if (code == AND
4731 && num1 > 1
4732 && bitwidth <= HOST_BITS_PER_WIDE_INT
4733 && CONST_INT_P (XEXP (x, 1))
4734 && (UINTVAL (XEXP (x, 1))
4735 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4736 return num1;
4737
4738 /* Similarly for IOR when setting high-order bits. */
4739 if (code == IOR
4740 && num1 > 1
4741 && bitwidth <= HOST_BITS_PER_WIDE_INT
4742 && CONST_INT_P (XEXP (x, 1))
4743 && (UINTVAL (XEXP (x, 1))
4744 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4745 return num1;
4746
4747 return MIN (num0, num1);
4748
4749 case PLUS: case MINUS:
4750 /* For addition and subtraction, we can have a 1-bit carry. However,
4751 if we are subtracting 1 from a positive number, there will not
4752 be such a carry. Furthermore, if the positive number is known to
4753 be 0 or 1, we know the result is either -1 or 0. */
4754
4755 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4756 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4757 {
4758 nonzero = nonzero_bits (XEXP (x, 0), mode);
4759 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4760 return (nonzero == 1 || nonzero == 0 ? bitwidth
4761 : bitwidth - floor_log2 (nonzero) - 1);
4762 }
4763
4764 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4765 known_x, known_mode, known_ret);
4766 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4767 known_x, known_mode, known_ret);
4768 result = MAX (1, MIN (num0, num1) - 1);
4769
4770 return result;
4771
4772 case MULT:
4773 /* The number of bits of the product is the sum of the number of
4774 bits of both terms. However, unless one of the terms if known
4775 to be positive, we must allow for an additional bit since negating
4776 a negative number can remove one sign bit copy. */
4777
4778 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4779 known_x, known_mode, known_ret);
4780 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4781 known_x, known_mode, known_ret);
4782
4783 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4784 if (result > 0
4785 && (bitwidth > HOST_BITS_PER_WIDE_INT
4786 || (((nonzero_bits (XEXP (x, 0), mode)
4787 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4788 && ((nonzero_bits (XEXP (x, 1), mode)
4789 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4790 != 0))))
4791 result--;
4792
4793 return MAX (1, result);
4794
4795 case UDIV:
4796 /* The result must be <= the first operand. If the first operand
4797 has the high bit set, we know nothing about the number of sign
4798 bit copies. */
4799 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4800 return 1;
4801 else if ((nonzero_bits (XEXP (x, 0), mode)
4802 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4803 return 1;
4804 else
4805 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4806 known_x, known_mode, known_ret);
4807
4808 case UMOD:
4809 /* The result must be <= the second operand. If the second operand
4810 has (or just might have) the high bit set, we know nothing about
4811 the number of sign bit copies. */
4812 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4813 return 1;
4814 else if ((nonzero_bits (XEXP (x, 1), mode)
4815 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4816 return 1;
4817 else
4818 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4819 known_x, known_mode, known_ret);
4820
4821 case DIV:
4822 /* Similar to unsigned division, except that we have to worry about
4823 the case where the divisor is negative, in which case we have
4824 to add 1. */
4825 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4826 known_x, known_mode, known_ret);
4827 if (result > 1
4828 && (bitwidth > HOST_BITS_PER_WIDE_INT
4829 || (nonzero_bits (XEXP (x, 1), mode)
4830 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4831 result--;
4832
4833 return result;
4834
4835 case MOD:
4836 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4837 known_x, known_mode, known_ret);
4838 if (result > 1
4839 && (bitwidth > HOST_BITS_PER_WIDE_INT
4840 || (nonzero_bits (XEXP (x, 1), mode)
4841 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4842 result--;
4843
4844 return result;
4845
4846 case ASHIFTRT:
4847 /* Shifts by a constant add to the number of bits equal to the
4848 sign bit. */
4849 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4850 known_x, known_mode, known_ret);
4851 if (CONST_INT_P (XEXP (x, 1))
4852 && INTVAL (XEXP (x, 1)) > 0
4853 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4854 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4855
4856 return num0;
4857
4858 case ASHIFT:
4859 /* Left shifts destroy copies. */
4860 if (!CONST_INT_P (XEXP (x, 1))
4861 || INTVAL (XEXP (x, 1)) < 0
4862 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4863 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4864 return 1;
4865
4866 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4867 known_x, known_mode, known_ret);
4868 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4869
4870 case IF_THEN_ELSE:
4871 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4872 known_x, known_mode, known_ret);
4873 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4874 known_x, known_mode, known_ret);
4875 return MIN (num0, num1);
4876
4877 case EQ: case NE: case GE: case GT: case LE: case LT:
4878 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4879 case GEU: case GTU: case LEU: case LTU:
4880 case UNORDERED: case ORDERED:
4881 /* If the constant is negative, take its 1's complement and remask.
4882 Then see how many zero bits we have. */
4883 nonzero = STORE_FLAG_VALUE;
4884 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4885 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4886 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4887
4888 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4889
4890 default:
4891 break;
4892 }
4893
4894 /* If we haven't been able to figure it out by one of the above rules,
4895 see if some of the high-order bits are known to be zero. If so,
4896 count those bits and return one less than that amount. If we can't
4897 safely compute the mask for this mode, always return BITWIDTH. */
4898
4899 bitwidth = GET_MODE_PRECISION (mode);
4900 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4901 return 1;
4902
4903 nonzero = nonzero_bits (x, mode);
4904 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4905 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4906 }
4907
4908 /* Calculate the rtx_cost of a single instruction. A return value of
4909 zero indicates an instruction pattern without a known cost. */
4910
4911 int
4912 insn_rtx_cost (rtx pat, bool speed)
4913 {
4914 int i, cost;
4915 rtx set;
4916
4917 /* Extract the single set rtx from the instruction pattern.
4918 We can't use single_set since we only have the pattern. */
4919 if (GET_CODE (pat) == SET)
4920 set = pat;
4921 else if (GET_CODE (pat) == PARALLEL)
4922 {
4923 set = NULL_RTX;
4924 for (i = 0; i < XVECLEN (pat, 0); i++)
4925 {
4926 rtx x = XVECEXP (pat, 0, i);
4927 if (GET_CODE (x) == SET)
4928 {
4929 if (set)
4930 return 0;
4931 set = x;
4932 }
4933 }
4934 if (!set)
4935 return 0;
4936 }
4937 else
4938 return 0;
4939
4940 cost = set_src_cost (SET_SRC (set), speed);
4941 return cost > 0 ? cost : COSTS_N_INSNS (1);
4942 }
4943
4944 /* Given an insn INSN and condition COND, return the condition in a
4945 canonical form to simplify testing by callers. Specifically:
4946
4947 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4948 (2) Both operands will be machine operands; (cc0) will have been replaced.
4949 (3) If an operand is a constant, it will be the second operand.
4950 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4951 for GE, GEU, and LEU.
4952
4953 If the condition cannot be understood, or is an inequality floating-point
4954 comparison which needs to be reversed, 0 will be returned.
4955
4956 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4957
4958 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4959 insn used in locating the condition was found. If a replacement test
4960 of the condition is desired, it should be placed in front of that
4961 insn and we will be sure that the inputs are still valid.
4962
4963 If WANT_REG is nonzero, we wish the condition to be relative to that
4964 register, if possible. Therefore, do not canonicalize the condition
4965 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4966 to be a compare to a CC mode register.
4967
4968 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4969 and at INSN. */
4970
4971 rtx
4972 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4973 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4974 {
4975 enum rtx_code code;
4976 rtx prev = insn;
4977 const_rtx set;
4978 rtx tem;
4979 rtx op0, op1;
4980 int reverse_code = 0;
4981 enum machine_mode mode;
4982 basic_block bb = BLOCK_FOR_INSN (insn);
4983
4984 code = GET_CODE (cond);
4985 mode = GET_MODE (cond);
4986 op0 = XEXP (cond, 0);
4987 op1 = XEXP (cond, 1);
4988
4989 if (reverse)
4990 code = reversed_comparison_code (cond, insn);
4991 if (code == UNKNOWN)
4992 return 0;
4993
4994 if (earliest)
4995 *earliest = insn;
4996
4997 /* If we are comparing a register with zero, see if the register is set
4998 in the previous insn to a COMPARE or a comparison operation. Perform
4999 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5000 in cse.c */
5001
5002 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5003 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5004 && op1 == CONST0_RTX (GET_MODE (op0))
5005 && op0 != want_reg)
5006 {
5007 /* Set nonzero when we find something of interest. */
5008 rtx x = 0;
5009
5010 #ifdef HAVE_cc0
5011 /* If comparison with cc0, import actual comparison from compare
5012 insn. */
5013 if (op0 == cc0_rtx)
5014 {
5015 if ((prev = prev_nonnote_insn (prev)) == 0
5016 || !NONJUMP_INSN_P (prev)
5017 || (set = single_set (prev)) == 0
5018 || SET_DEST (set) != cc0_rtx)
5019 return 0;
5020
5021 op0 = SET_SRC (set);
5022 op1 = CONST0_RTX (GET_MODE (op0));
5023 if (earliest)
5024 *earliest = prev;
5025 }
5026 #endif
5027
5028 /* If this is a COMPARE, pick up the two things being compared. */
5029 if (GET_CODE (op0) == COMPARE)
5030 {
5031 op1 = XEXP (op0, 1);
5032 op0 = XEXP (op0, 0);
5033 continue;
5034 }
5035 else if (!REG_P (op0))
5036 break;
5037
5038 /* Go back to the previous insn. Stop if it is not an INSN. We also
5039 stop if it isn't a single set or if it has a REG_INC note because
5040 we don't want to bother dealing with it. */
5041
5042 prev = prev_nonnote_nondebug_insn (prev);
5043
5044 if (prev == 0
5045 || !NONJUMP_INSN_P (prev)
5046 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5047 /* In cfglayout mode, there do not have to be labels at the
5048 beginning of a block, or jumps at the end, so the previous
5049 conditions would not stop us when we reach bb boundary. */
5050 || BLOCK_FOR_INSN (prev) != bb)
5051 break;
5052
5053 set = set_of (op0, prev);
5054
5055 if (set
5056 && (GET_CODE (set) != SET
5057 || !rtx_equal_p (SET_DEST (set), op0)))
5058 break;
5059
5060 /* If this is setting OP0, get what it sets it to if it looks
5061 relevant. */
5062 if (set)
5063 {
5064 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
5065 #ifdef FLOAT_STORE_FLAG_VALUE
5066 REAL_VALUE_TYPE fsfv;
5067 #endif
5068
5069 /* ??? We may not combine comparisons done in a CCmode with
5070 comparisons not done in a CCmode. This is to aid targets
5071 like Alpha that have an IEEE compliant EQ instruction, and
5072 a non-IEEE compliant BEQ instruction. The use of CCmode is
5073 actually artificial, simply to prevent the combination, but
5074 should not affect other platforms.
5075
5076 However, we must allow VOIDmode comparisons to match either
5077 CCmode or non-CCmode comparison, because some ports have
5078 modeless comparisons inside branch patterns.
5079
5080 ??? This mode check should perhaps look more like the mode check
5081 in simplify_comparison in combine. */
5082 if (((GET_MODE_CLASS (mode) == MODE_CC)
5083 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5084 && mode != VOIDmode
5085 && inner_mode != VOIDmode)
5086 break;
5087 if (GET_CODE (SET_SRC (set)) == COMPARE
5088 || (((code == NE
5089 || (code == LT
5090 && val_signbit_known_set_p (inner_mode,
5091 STORE_FLAG_VALUE))
5092 #ifdef FLOAT_STORE_FLAG_VALUE
5093 || (code == LT
5094 && SCALAR_FLOAT_MODE_P (inner_mode)
5095 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5096 REAL_VALUE_NEGATIVE (fsfv)))
5097 #endif
5098 ))
5099 && COMPARISON_P (SET_SRC (set))))
5100 x = SET_SRC (set);
5101 else if (((code == EQ
5102 || (code == GE
5103 && val_signbit_known_set_p (inner_mode,
5104 STORE_FLAG_VALUE))
5105 #ifdef FLOAT_STORE_FLAG_VALUE
5106 || (code == GE
5107 && SCALAR_FLOAT_MODE_P (inner_mode)
5108 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5109 REAL_VALUE_NEGATIVE (fsfv)))
5110 #endif
5111 ))
5112 && COMPARISON_P (SET_SRC (set)))
5113 {
5114 reverse_code = 1;
5115 x = SET_SRC (set);
5116 }
5117 else if ((code == EQ || code == NE)
5118 && GET_CODE (SET_SRC (set)) == XOR)
5119 /* Handle sequences like:
5120
5121 (set op0 (xor X Y))
5122 ...(eq|ne op0 (const_int 0))...
5123
5124 in which case:
5125
5126 (eq op0 (const_int 0)) reduces to (eq X Y)
5127 (ne op0 (const_int 0)) reduces to (ne X Y)
5128
5129 This is the form used by MIPS16, for example. */
5130 x = SET_SRC (set);
5131 else
5132 break;
5133 }
5134
5135 else if (reg_set_p (op0, prev))
5136 /* If this sets OP0, but not directly, we have to give up. */
5137 break;
5138
5139 if (x)
5140 {
5141 /* If the caller is expecting the condition to be valid at INSN,
5142 make sure X doesn't change before INSN. */
5143 if (valid_at_insn_p)
5144 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5145 break;
5146 if (COMPARISON_P (x))
5147 code = GET_CODE (x);
5148 if (reverse_code)
5149 {
5150 code = reversed_comparison_code (x, prev);
5151 if (code == UNKNOWN)
5152 return 0;
5153 reverse_code = 0;
5154 }
5155
5156 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5157 if (earliest)
5158 *earliest = prev;
5159 }
5160 }
5161
5162 /* If constant is first, put it last. */
5163 if (CONSTANT_P (op0))
5164 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5165
5166 /* If OP0 is the result of a comparison, we weren't able to find what
5167 was really being compared, so fail. */
5168 if (!allow_cc_mode
5169 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5170 return 0;
5171
5172 /* Canonicalize any ordered comparison with integers involving equality
5173 if we can do computations in the relevant mode and we do not
5174 overflow. */
5175
5176 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5177 && CONST_INT_P (op1)
5178 && GET_MODE (op0) != VOIDmode
5179 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5180 {
5181 HOST_WIDE_INT const_val = INTVAL (op1);
5182 unsigned HOST_WIDE_INT uconst_val = const_val;
5183 unsigned HOST_WIDE_INT max_val
5184 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5185
5186 switch (code)
5187 {
5188 case LE:
5189 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5190 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5191 break;
5192
5193 /* When cross-compiling, const_val might be sign-extended from
5194 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5195 case GE:
5196 if ((const_val & max_val)
5197 != ((unsigned HOST_WIDE_INT) 1
5198 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5199 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5200 break;
5201
5202 case LEU:
5203 if (uconst_val < max_val)
5204 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5205 break;
5206
5207 case GEU:
5208 if (uconst_val != 0)
5209 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5210 break;
5211
5212 default:
5213 break;
5214 }
5215 }
5216
5217 /* Never return CC0; return zero instead. */
5218 if (CC0_P (op0))
5219 return 0;
5220
5221 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5222 }
5223
5224 /* Given a jump insn JUMP, return the condition that will cause it to branch
5225 to its JUMP_LABEL. If the condition cannot be understood, or is an
5226 inequality floating-point comparison which needs to be reversed, 0 will
5227 be returned.
5228
5229 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5230 insn used in locating the condition was found. If a replacement test
5231 of the condition is desired, it should be placed in front of that
5232 insn and we will be sure that the inputs are still valid. If EARLIEST
5233 is null, the returned condition will be valid at INSN.
5234
5235 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5236 compare CC mode register.
5237
5238 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5239
5240 rtx
5241 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5242 {
5243 rtx cond;
5244 int reverse;
5245 rtx set;
5246
5247 /* If this is not a standard conditional jump, we can't parse it. */
5248 if (!JUMP_P (jump)
5249 || ! any_condjump_p (jump))
5250 return 0;
5251 set = pc_set (jump);
5252
5253 cond = XEXP (SET_SRC (set), 0);
5254
5255 /* If this branches to JUMP_LABEL when the condition is false, reverse
5256 the condition. */
5257 reverse
5258 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5259 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5260
5261 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5262 allow_cc_mode, valid_at_insn_p);
5263 }
5264
5265 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5266 TARGET_MODE_REP_EXTENDED.
5267
5268 Note that we assume that the property of
5269 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5270 narrower than mode B. I.e., if A is a mode narrower than B then in
5271 order to be able to operate on it in mode B, mode A needs to
5272 satisfy the requirements set by the representation of mode B. */
5273
5274 static void
5275 init_num_sign_bit_copies_in_rep (void)
5276 {
5277 enum machine_mode mode, in_mode;
5278
5279 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5280 in_mode = GET_MODE_WIDER_MODE (mode))
5281 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5282 mode = GET_MODE_WIDER_MODE (mode))
5283 {
5284 enum machine_mode i;
5285
5286 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5287 extends to the next widest mode. */
5288 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5289 || GET_MODE_WIDER_MODE (mode) == in_mode);
5290
5291 /* We are in in_mode. Count how many bits outside of mode
5292 have to be copies of the sign-bit. */
5293 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5294 {
5295 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5296
5297 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5298 /* We can only check sign-bit copies starting from the
5299 top-bit. In order to be able to check the bits we
5300 have already seen we pretend that subsequent bits
5301 have to be sign-bit copies too. */
5302 || num_sign_bit_copies_in_rep [in_mode][mode])
5303 num_sign_bit_copies_in_rep [in_mode][mode]
5304 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5305 }
5306 }
5307 }
5308
5309 /* Suppose that truncation from the machine mode of X to MODE is not a
5310 no-op. See if there is anything special about X so that we can
5311 assume it already contains a truncated value of MODE. */
5312
5313 bool
5314 truncated_to_mode (enum machine_mode mode, const_rtx x)
5315 {
5316 /* This register has already been used in MODE without explicit
5317 truncation. */
5318 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5319 return true;
5320
5321 /* See if we already satisfy the requirements of MODE. If yes we
5322 can just switch to MODE. */
5323 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5324 && (num_sign_bit_copies (x, GET_MODE (x))
5325 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5326 return true;
5327
5328 return false;
5329 }
5330 \f
5331 /* Initialize non_rtx_starting_operands, which is used to speed up
5332 for_each_rtx. */
5333 void
5334 init_rtlanal (void)
5335 {
5336 int i;
5337 for (i = 0; i < NUM_RTX_CODE; i++)
5338 {
5339 const char *format = GET_RTX_FORMAT (i);
5340 const char *first = strpbrk (format, "eEV");
5341 non_rtx_starting_operands[i] = first ? first - format : -1;
5342 }
5343
5344 init_num_sign_bit_copies_in_rep ();
5345 }
5346 \f
5347 /* Check whether this is a constant pool constant. */
5348 bool
5349 constant_pool_constant_p (rtx x)
5350 {
5351 x = avoid_constant_pool_reference (x);
5352 return CONST_DOUBLE_P (x);
5353 }
5354 \f
5355 /* If M is a bitmask that selects a field of low-order bits within an item but
5356 not the entire word, return the length of the field. Return -1 otherwise.
5357 M is used in machine mode MODE. */
5358
5359 int
5360 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5361 {
5362 if (mode != VOIDmode)
5363 {
5364 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5365 return -1;
5366 m &= GET_MODE_MASK (mode);
5367 }
5368
5369 return exact_log2 (m + 1);
5370 }
5371
5372 /* Return the mode of MEM's address. */
5373
5374 enum machine_mode
5375 get_address_mode (rtx mem)
5376 {
5377 enum machine_mode mode;
5378
5379 gcc_assert (MEM_P (mem));
5380 mode = GET_MODE (XEXP (mem, 0));
5381 if (mode != VOIDmode)
5382 return mode;
5383 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5384 }
5385 \f
5386 /* Split up a CONST_DOUBLE or integer constant rtx
5387 into two rtx's for single words,
5388 storing in *FIRST the word that comes first in memory in the target
5389 and in *SECOND the other.
5390
5391 TODO: This function needs to be rewritten to work on any size
5392 integer. */
5393
5394 void
5395 split_double (rtx value, rtx *first, rtx *second)
5396 {
5397 if (CONST_INT_P (value))
5398 {
5399 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5400 {
5401 /* In this case the CONST_INT holds both target words.
5402 Extract the bits from it into two word-sized pieces.
5403 Sign extend each half to HOST_WIDE_INT. */
5404 unsigned HOST_WIDE_INT low, high;
5405 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5406 unsigned bits_per_word = BITS_PER_WORD;
5407
5408 /* Set sign_bit to the most significant bit of a word. */
5409 sign_bit = 1;
5410 sign_bit <<= bits_per_word - 1;
5411
5412 /* Set mask so that all bits of the word are set. We could
5413 have used 1 << BITS_PER_WORD instead of basing the
5414 calculation on sign_bit. However, on machines where
5415 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5416 compiler warning, even though the code would never be
5417 executed. */
5418 mask = sign_bit << 1;
5419 mask--;
5420
5421 /* Set sign_extend as any remaining bits. */
5422 sign_extend = ~mask;
5423
5424 /* Pick the lower word and sign-extend it. */
5425 low = INTVAL (value);
5426 low &= mask;
5427 if (low & sign_bit)
5428 low |= sign_extend;
5429
5430 /* Pick the higher word, shifted to the least significant
5431 bits, and sign-extend it. */
5432 high = INTVAL (value);
5433 high >>= bits_per_word - 1;
5434 high >>= 1;
5435 high &= mask;
5436 if (high & sign_bit)
5437 high |= sign_extend;
5438
5439 /* Store the words in the target machine order. */
5440 if (WORDS_BIG_ENDIAN)
5441 {
5442 *first = GEN_INT (high);
5443 *second = GEN_INT (low);
5444 }
5445 else
5446 {
5447 *first = GEN_INT (low);
5448 *second = GEN_INT (high);
5449 }
5450 }
5451 else
5452 {
5453 /* The rule for using CONST_INT for a wider mode
5454 is that we regard the value as signed.
5455 So sign-extend it. */
5456 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5457 if (WORDS_BIG_ENDIAN)
5458 {
5459 *first = high;
5460 *second = value;
5461 }
5462 else
5463 {
5464 *first = value;
5465 *second = high;
5466 }
5467 }
5468 }
5469 else if (GET_CODE (value) == CONST_WIDE_INT)
5470 {
5471 /* All of this is scary code and needs to be converted to
5472 properly work with any size integer. */
5473 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5474 if (WORDS_BIG_ENDIAN)
5475 {
5476 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5477 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5478 }
5479 else
5480 {
5481 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5482 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5483 }
5484 }
5485 else if (!CONST_DOUBLE_P (value))
5486 {
5487 if (WORDS_BIG_ENDIAN)
5488 {
5489 *first = const0_rtx;
5490 *second = value;
5491 }
5492 else
5493 {
5494 *first = value;
5495 *second = const0_rtx;
5496 }
5497 }
5498 else if (GET_MODE (value) == VOIDmode
5499 /* This is the old way we did CONST_DOUBLE integers. */
5500 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5501 {
5502 /* In an integer, the words are defined as most and least significant.
5503 So order them by the target's convention. */
5504 if (WORDS_BIG_ENDIAN)
5505 {
5506 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5507 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5508 }
5509 else
5510 {
5511 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5512 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5513 }
5514 }
5515 else
5516 {
5517 REAL_VALUE_TYPE r;
5518 long l[2];
5519 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5520
5521 /* Note, this converts the REAL_VALUE_TYPE to the target's
5522 format, splits up the floating point double and outputs
5523 exactly 32 bits of it into each of l[0] and l[1] --
5524 not necessarily BITS_PER_WORD bits. */
5525 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5526
5527 /* If 32 bits is an entire word for the target, but not for the host,
5528 then sign-extend on the host so that the number will look the same
5529 way on the host that it would on the target. See for instance
5530 simplify_unary_operation. The #if is needed to avoid compiler
5531 warnings. */
5532
5533 #if HOST_BITS_PER_LONG > 32
5534 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5535 {
5536 if (l[0] & ((long) 1 << 31))
5537 l[0] |= ((long) (-1) << 32);
5538 if (l[1] & ((long) 1 << 31))
5539 l[1] |= ((long) (-1) << 32);
5540 }
5541 #endif
5542
5543 *first = GEN_INT (l[0]);
5544 *second = GEN_INT (l[1]);
5545 }
5546 }
5547
5548 /* Return true if X is a sign_extract or zero_extract from the least
5549 significant bit. */
5550
5551 static bool
5552 lsb_bitfield_op_p (rtx x)
5553 {
5554 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5555 {
5556 enum machine_mode mode = GET_MODE (XEXP (x, 0));
5557 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5558 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5559
5560 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5561 }
5562 return false;
5563 }
5564
5565 /* Strip outer address "mutations" from LOC and return a pointer to the
5566 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5567 stripped expression there.
5568
5569 "Mutations" either convert between modes or apply some kind of
5570 extension, truncation or alignment. */
5571
5572 rtx *
5573 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5574 {
5575 for (;;)
5576 {
5577 enum rtx_code code = GET_CODE (*loc);
5578 if (GET_RTX_CLASS (code) == RTX_UNARY)
5579 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5580 used to convert between pointer sizes. */
5581 loc = &XEXP (*loc, 0);
5582 else if (lsb_bitfield_op_p (*loc))
5583 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5584 acts as a combined truncation and extension. */
5585 loc = &XEXP (*loc, 0);
5586 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5587 /* (and ... (const_int -X)) is used to align to X bytes. */
5588 loc = &XEXP (*loc, 0);
5589 else if (code == SUBREG
5590 && !OBJECT_P (SUBREG_REG (*loc))
5591 && subreg_lowpart_p (*loc))
5592 /* (subreg (operator ...) ...) inside and is used for mode
5593 conversion too. */
5594 loc = &SUBREG_REG (*loc);
5595 else
5596 return loc;
5597 if (outer_code)
5598 *outer_code = code;
5599 }
5600 }
5601
5602 /* Return true if CODE applies some kind of scale. The scaled value is
5603 is the first operand and the scale is the second. */
5604
5605 static bool
5606 binary_scale_code_p (enum rtx_code code)
5607 {
5608 return (code == MULT
5609 || code == ASHIFT
5610 /* Needed by ARM targets. */
5611 || code == ASHIFTRT
5612 || code == LSHIFTRT
5613 || code == ROTATE
5614 || code == ROTATERT);
5615 }
5616
5617 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5618 (see address_info). Return null otherwise. */
5619
5620 static rtx *
5621 get_base_term (rtx *inner)
5622 {
5623 if (GET_CODE (*inner) == LO_SUM)
5624 inner = strip_address_mutations (&XEXP (*inner, 0));
5625 if (REG_P (*inner)
5626 || MEM_P (*inner)
5627 || GET_CODE (*inner) == SUBREG)
5628 return inner;
5629 return 0;
5630 }
5631
5632 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5633 (see address_info). Return null otherwise. */
5634
5635 static rtx *
5636 get_index_term (rtx *inner)
5637 {
5638 /* At present, only constant scales are allowed. */
5639 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5640 inner = strip_address_mutations (&XEXP (*inner, 0));
5641 if (REG_P (*inner)
5642 || MEM_P (*inner)
5643 || GET_CODE (*inner) == SUBREG)
5644 return inner;
5645 return 0;
5646 }
5647
5648 /* Set the segment part of address INFO to LOC, given that INNER is the
5649 unmutated value. */
5650
5651 static void
5652 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5653 {
5654 gcc_assert (!info->segment);
5655 info->segment = loc;
5656 info->segment_term = inner;
5657 }
5658
5659 /* Set the base part of address INFO to LOC, given that INNER is the
5660 unmutated value. */
5661
5662 static void
5663 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5664 {
5665 gcc_assert (!info->base);
5666 info->base = loc;
5667 info->base_term = inner;
5668 }
5669
5670 /* Set the index part of address INFO to LOC, given that INNER is the
5671 unmutated value. */
5672
5673 static void
5674 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5675 {
5676 gcc_assert (!info->index);
5677 info->index = loc;
5678 info->index_term = inner;
5679 }
5680
5681 /* Set the displacement part of address INFO to LOC, given that INNER
5682 is the constant term. */
5683
5684 static void
5685 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5686 {
5687 gcc_assert (!info->disp);
5688 info->disp = loc;
5689 info->disp_term = inner;
5690 }
5691
5692 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5693 rest of INFO accordingly. */
5694
5695 static void
5696 decompose_incdec_address (struct address_info *info)
5697 {
5698 info->autoinc_p = true;
5699
5700 rtx *base = &XEXP (*info->inner, 0);
5701 set_address_base (info, base, base);
5702 gcc_checking_assert (info->base == info->base_term);
5703
5704 /* These addresses are only valid when the size of the addressed
5705 value is known. */
5706 gcc_checking_assert (info->mode != VOIDmode);
5707 }
5708
5709 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5710 of INFO accordingly. */
5711
5712 static void
5713 decompose_automod_address (struct address_info *info)
5714 {
5715 info->autoinc_p = true;
5716
5717 rtx *base = &XEXP (*info->inner, 0);
5718 set_address_base (info, base, base);
5719 gcc_checking_assert (info->base == info->base_term);
5720
5721 rtx plus = XEXP (*info->inner, 1);
5722 gcc_assert (GET_CODE (plus) == PLUS);
5723
5724 info->base_term2 = &XEXP (plus, 0);
5725 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5726
5727 rtx *step = &XEXP (plus, 1);
5728 rtx *inner_step = strip_address_mutations (step);
5729 if (CONSTANT_P (*inner_step))
5730 set_address_disp (info, step, inner_step);
5731 else
5732 set_address_index (info, step, inner_step);
5733 }
5734
5735 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5736 values in [PTR, END). Return a pointer to the end of the used array. */
5737
5738 static rtx **
5739 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5740 {
5741 rtx x = *loc;
5742 if (GET_CODE (x) == PLUS)
5743 {
5744 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5745 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5746 }
5747 else
5748 {
5749 gcc_assert (ptr != end);
5750 *ptr++ = loc;
5751 }
5752 return ptr;
5753 }
5754
5755 /* Evaluate the likelihood of X being a base or index value, returning
5756 positive if it is likely to be a base, negative if it is likely to be
5757 an index, and 0 if we can't tell. Make the magnitude of the return
5758 value reflect the amount of confidence we have in the answer.
5759
5760 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5761
5762 static int
5763 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5764 enum rtx_code outer_code, enum rtx_code index_code)
5765 {
5766 /* Believe *_POINTER unless the address shape requires otherwise. */
5767 if (REG_P (x) && REG_POINTER (x))
5768 return 2;
5769 if (MEM_P (x) && MEM_POINTER (x))
5770 return 2;
5771
5772 if (REG_P (x) && HARD_REGISTER_P (x))
5773 {
5774 /* X is a hard register. If it only fits one of the base
5775 or index classes, choose that interpretation. */
5776 int regno = REGNO (x);
5777 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5778 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5779 if (base_p != index_p)
5780 return base_p ? 1 : -1;
5781 }
5782 return 0;
5783 }
5784
5785 /* INFO->INNER describes a normal, non-automodified address.
5786 Fill in the rest of INFO accordingly. */
5787
5788 static void
5789 decompose_normal_address (struct address_info *info)
5790 {
5791 /* Treat the address as the sum of up to four values. */
5792 rtx *ops[4];
5793 size_t n_ops = extract_plus_operands (info->inner, ops,
5794 ops + ARRAY_SIZE (ops)) - ops;
5795
5796 /* If there is more than one component, any base component is in a PLUS. */
5797 if (n_ops > 1)
5798 info->base_outer_code = PLUS;
5799
5800 /* Try to classify each sum operand now. Leave those that could be
5801 either a base or an index in OPS. */
5802 rtx *inner_ops[4];
5803 size_t out = 0;
5804 for (size_t in = 0; in < n_ops; ++in)
5805 {
5806 rtx *loc = ops[in];
5807 rtx *inner = strip_address_mutations (loc);
5808 if (CONSTANT_P (*inner))
5809 set_address_disp (info, loc, inner);
5810 else if (GET_CODE (*inner) == UNSPEC)
5811 set_address_segment (info, loc, inner);
5812 else
5813 {
5814 /* The only other possibilities are a base or an index. */
5815 rtx *base_term = get_base_term (inner);
5816 rtx *index_term = get_index_term (inner);
5817 gcc_assert (base_term || index_term);
5818 if (!base_term)
5819 set_address_index (info, loc, index_term);
5820 else if (!index_term)
5821 set_address_base (info, loc, base_term);
5822 else
5823 {
5824 gcc_assert (base_term == index_term);
5825 ops[out] = loc;
5826 inner_ops[out] = base_term;
5827 ++out;
5828 }
5829 }
5830 }
5831
5832 /* Classify the remaining OPS members as bases and indexes. */
5833 if (out == 1)
5834 {
5835 /* If we haven't seen a base or an index yet, assume that this is
5836 the base. If we were confident that another term was the base
5837 or index, treat the remaining operand as the other kind. */
5838 if (!info->base)
5839 set_address_base (info, ops[0], inner_ops[0]);
5840 else
5841 set_address_index (info, ops[0], inner_ops[0]);
5842 }
5843 else if (out == 2)
5844 {
5845 /* In the event of a tie, assume the base comes first. */
5846 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5847 GET_CODE (*ops[1]))
5848 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5849 GET_CODE (*ops[0])))
5850 {
5851 set_address_base (info, ops[0], inner_ops[0]);
5852 set_address_index (info, ops[1], inner_ops[1]);
5853 }
5854 else
5855 {
5856 set_address_base (info, ops[1], inner_ops[1]);
5857 set_address_index (info, ops[0], inner_ops[0]);
5858 }
5859 }
5860 else
5861 gcc_assert (out == 0);
5862 }
5863
5864 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5865 or VOIDmode if not known. AS is the address space associated with LOC.
5866 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5867
5868 void
5869 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5870 addr_space_t as, enum rtx_code outer_code)
5871 {
5872 memset (info, 0, sizeof (*info));
5873 info->mode = mode;
5874 info->as = as;
5875 info->addr_outer_code = outer_code;
5876 info->outer = loc;
5877 info->inner = strip_address_mutations (loc, &outer_code);
5878 info->base_outer_code = outer_code;
5879 switch (GET_CODE (*info->inner))
5880 {
5881 case PRE_DEC:
5882 case PRE_INC:
5883 case POST_DEC:
5884 case POST_INC:
5885 decompose_incdec_address (info);
5886 break;
5887
5888 case PRE_MODIFY:
5889 case POST_MODIFY:
5890 decompose_automod_address (info);
5891 break;
5892
5893 default:
5894 decompose_normal_address (info);
5895 break;
5896 }
5897 }
5898
5899 /* Describe address operand LOC in INFO. */
5900
5901 void
5902 decompose_lea_address (struct address_info *info, rtx *loc)
5903 {
5904 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5905 }
5906
5907 /* Describe the address of MEM X in INFO. */
5908
5909 void
5910 decompose_mem_address (struct address_info *info, rtx x)
5911 {
5912 gcc_assert (MEM_P (x));
5913 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5914 MEM_ADDR_SPACE (x), MEM);
5915 }
5916
5917 /* Update INFO after a change to the address it describes. */
5918
5919 void
5920 update_address (struct address_info *info)
5921 {
5922 decompose_address (info, info->outer, info->mode, info->as,
5923 info->addr_outer_code);
5924 }
5925
5926 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5927 more complicated than that. */
5928
5929 HOST_WIDE_INT
5930 get_index_scale (const struct address_info *info)
5931 {
5932 rtx index = *info->index;
5933 if (GET_CODE (index) == MULT
5934 && CONST_INT_P (XEXP (index, 1))
5935 && info->index_term == &XEXP (index, 0))
5936 return INTVAL (XEXP (index, 1));
5937
5938 if (GET_CODE (index) == ASHIFT
5939 && CONST_INT_P (XEXP (index, 1))
5940 && info->index_term == &XEXP (index, 0))
5941 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
5942
5943 if (info->index == info->index_term)
5944 return 1;
5945
5946 return 0;
5947 }
5948
5949 /* Return the "index code" of INFO, in the form required by
5950 ok_for_base_p_1. */
5951
5952 enum rtx_code
5953 get_index_code (const struct address_info *info)
5954 {
5955 if (info->index)
5956 return GET_CODE (*info->index);
5957
5958 if (info->disp)
5959 return GET_CODE (*info->disp);
5960
5961 return SCRATCH;
5962 }