1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
86 #include "double-int.h"
93 #include "stor-layout.h"
97 #include "hard-reg-set.h"
100 #include "dominance.h"
103 #include "cfgcleanup.h"
104 #include "basic-block.h"
105 #include "insn-config.h"
106 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
108 #include "statistics.h"
110 #include "fixed-value.h"
115 #include "emit-rtl.h"
119 #include "insn-attr.h"
121 #include "diagnostic-core.h"
123 #include "insn-codes.h"
125 #include "rtlhooks-def.h"
127 #include "tree-pass.h"
129 #include "valtrack.h"
130 #include "hash-map.h"
132 #include "plugin-api.h"
136 #include "rtl-iter.h"
138 /* Number of attempts to combine instructions in this function. */
140 static int combine_attempts
;
142 /* Number of attempts that got as far as substitution in this function. */
144 static int combine_merges
;
146 /* Number of instructions combined with added SETs in this function. */
148 static int combine_extras
;
150 /* Number of instructions combined in this function. */
152 static int combine_successes
;
154 /* Totals over entire compilation. */
156 static int total_attempts
, total_merges
, total_extras
, total_successes
;
158 /* combine_instructions may try to replace the right hand side of the
159 second instruction with the value of an associated REG_EQUAL note
160 before throwing it at try_combine. That is problematic when there
161 is a REG_DEAD note for a register used in the old right hand side
162 and can cause distribute_notes to do wrong things. This is the
163 second instruction if it has been so modified, null otherwise. */
165 static rtx_insn
*i2mod
;
167 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
169 static rtx i2mod_old_rhs
;
171 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
173 static rtx i2mod_new_rhs
;
175 typedef struct reg_stat_struct
{
176 /* Record last point of death of (hard or pseudo) register n. */
177 rtx_insn
*last_death
;
179 /* Record last point of modification of (hard or pseudo) register n. */
182 /* The next group of fields allows the recording of the last value assigned
183 to (hard or pseudo) register n. We use this information to see if an
184 operation being processed is redundant given a prior operation performed
185 on the register. For example, an `and' with a constant is redundant if
186 all the zero bits are already known to be turned off.
188 We use an approach similar to that used by cse, but change it in the
191 (1) We do not want to reinitialize at each label.
192 (2) It is useful, but not critical, to know the actual value assigned
193 to a register. Often just its form is helpful.
195 Therefore, we maintain the following fields:
197 last_set_value the last value assigned
198 last_set_label records the value of label_tick when the
199 register was assigned
200 last_set_table_tick records the value of label_tick when a
201 value using the register is assigned
202 last_set_invalid set to nonzero when it is not valid
203 to use the value of this register in some
206 To understand the usage of these tables, it is important to understand
207 the distinction between the value in last_set_value being valid and
208 the register being validly contained in some other expression in the
211 (The next two parameters are out of date).
213 reg_stat[i].last_set_value is valid if it is nonzero, and either
214 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
216 Register I may validly appear in any expression returned for the value
217 of another register if reg_n_sets[i] is 1. It may also appear in the
218 value for register J if reg_stat[j].last_set_invalid is zero, or
219 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
221 If an expression is found in the table containing a register which may
222 not validly appear in an expression, the register is replaced by
223 something that won't match, (clobber (const_int 0)). */
225 /* Record last value assigned to (hard or pseudo) register n. */
229 /* Record the value of label_tick when an expression involving register n
230 is placed in last_set_value. */
232 int last_set_table_tick
;
234 /* Record the value of label_tick when the value for register n is placed in
239 /* These fields are maintained in parallel with last_set_value and are
240 used to store the mode in which the register was last set, the bits
241 that were known to be zero when it was last set, and the number of
242 sign bits copies it was known to have when it was last set. */
244 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
245 char last_set_sign_bit_copies
;
246 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
248 /* Set nonzero if references to register n in expressions should not be
249 used. last_set_invalid is set nonzero when this register is being
250 assigned to and last_set_table_tick == label_tick. */
252 char last_set_invalid
;
254 /* Some registers that are set more than once and used in more than one
255 basic block are nevertheless always set in similar ways. For example,
256 a QImode register may be loaded from memory in two places on a machine
257 where byte loads zero extend.
259 We record in the following fields if a register has some leading bits
260 that are always equal to the sign bit, and what we know about the
261 nonzero bits of a register, specifically which bits are known to be
264 If an entry is zero, it means that we don't know anything special. */
266 unsigned char sign_bit_copies
;
268 unsigned HOST_WIDE_INT nonzero_bits
;
270 /* Record the value of the label_tick when the last truncation
271 happened. The field truncated_to_mode is only valid if
272 truncation_label == label_tick. */
274 int truncation_label
;
276 /* Record the last truncation seen for this register. If truncation
277 is not a nop to this mode we might be able to save an explicit
278 truncation if we know that value already contains a truncated
281 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
285 static vec
<reg_stat_type
> reg_stat
;
287 /* One plus the highest pseudo for which we track REG_N_SETS.
288 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
289 but during combine_split_insns new pseudos can be created. As we don't have
290 updated DF information in that case, it is hard to initialize the array
291 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
292 so instead of growing the arrays, just assume all newly created pseudos
293 during combine might be set multiple times. */
295 static unsigned int reg_n_sets_max
;
297 /* Record the luid of the last insn that invalidated memory
298 (anything that writes memory, and subroutine calls, but not pushes). */
300 static int mem_last_set
;
302 /* Record the luid of the last CALL_INSN
303 so we can tell whether a potential combination crosses any calls. */
305 static int last_call_luid
;
307 /* When `subst' is called, this is the insn that is being modified
308 (by combining in a previous insn). The PATTERN of this insn
309 is still the old pattern partially modified and it should not be
310 looked at, but this may be used to examine the successors of the insn
311 to judge whether a simplification is valid. */
313 static rtx_insn
*subst_insn
;
315 /* This is the lowest LUID that `subst' is currently dealing with.
316 get_last_value will not return a value if the register was set at or
317 after this LUID. If not for this mechanism, we could get confused if
318 I2 or I1 in try_combine were an insn that used the old value of a register
319 to obtain a new value. In that case, we might erroneously get the
320 new value of the register when we wanted the old one. */
322 static int subst_low_luid
;
324 /* This contains any hard registers that are used in newpat; reg_dead_at_p
325 must consider all these registers to be always live. */
327 static HARD_REG_SET newpat_used_regs
;
329 /* This is an insn to which a LOG_LINKS entry has been added. If this
330 insn is the earlier than I2 or I3, combine should rescan starting at
333 static rtx_insn
*added_links_insn
;
335 /* Basic block in which we are performing combines. */
336 static basic_block this_basic_block
;
337 static bool optimize_this_for_speed_p
;
340 /* Length of the currently allocated uid_insn_cost array. */
342 static int max_uid_known
;
344 /* The following array records the insn_rtx_cost for every insn
345 in the instruction stream. */
347 static int *uid_insn_cost
;
349 /* The following array records the LOG_LINKS for every insn in the
350 instruction stream as struct insn_link pointers. */
355 struct insn_link
*next
;
358 static struct insn_link
**uid_log_links
;
360 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
361 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
363 #define FOR_EACH_LOG_LINK(L, INSN) \
364 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
366 /* Links for LOG_LINKS are allocated from this obstack. */
368 static struct obstack insn_link_obstack
;
370 /* Allocate a link. */
372 static inline struct insn_link
*
373 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
376 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
377 sizeof (struct insn_link
));
384 /* Incremented for each basic block. */
386 static int label_tick
;
388 /* Reset to label_tick for each extended basic block in scanning order. */
390 static int label_tick_ebb_start
;
392 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
393 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
395 static machine_mode nonzero_bits_mode
;
397 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
398 be safely used. It is zero while computing them and after combine has
399 completed. This former test prevents propagating values based on
400 previously set values, which can be incorrect if a variable is modified
403 static int nonzero_sign_valid
;
406 /* Record one modification to rtl structure
407 to be undone by storing old_contents into *where. */
409 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
415 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
416 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
419 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
420 num_undo says how many are currently recorded.
422 other_insn is nonzero if we have modified some other insn in the process
423 of working on subst_insn. It must be verified too. */
429 rtx_insn
*other_insn
;
432 static struct undobuf undobuf
;
434 /* Number of times the pseudo being substituted for
435 was found and replaced. */
437 static int n_occurrences
;
439 static rtx
reg_nonzero_bits_for_combine (const_rtx
, machine_mode
, const_rtx
,
441 unsigned HOST_WIDE_INT
,
442 unsigned HOST_WIDE_INT
*);
443 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, machine_mode
, const_rtx
,
445 unsigned int, unsigned int *);
446 static void do_SUBST (rtx
*, rtx
);
447 static void do_SUBST_INT (int *, int);
448 static void init_reg_last (void);
449 static void setup_incoming_promotions (rtx_insn
*);
450 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
451 static int cant_combine_insn_p (rtx_insn
*);
452 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
453 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
454 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
455 static int contains_muldiv (rtx
);
456 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
458 static void undo_all (void);
459 static void undo_commit (void);
460 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
461 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
462 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
463 static rtx
simplify_if_then_else (rtx
);
464 static rtx
simplify_set (rtx
);
465 static rtx
simplify_logical (rtx
);
466 static rtx
expand_compound_operation (rtx
);
467 static const_rtx
expand_field_assignment (const_rtx
);
468 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
469 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
470 static rtx
extract_left_shift (rtx
, int);
471 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
472 unsigned HOST_WIDE_INT
*);
473 static rtx
canon_reg_for_combine (rtx
, rtx
);
474 static rtx
force_to_mode (rtx
, machine_mode
,
475 unsigned HOST_WIDE_INT
, int);
476 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
477 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
478 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
479 static rtx
make_field_assignment (rtx
);
480 static rtx
apply_distributive_law (rtx
);
481 static rtx
distribute_and_simplify_rtx (rtx
, int);
482 static rtx
simplify_and_const_int_1 (machine_mode
, rtx
,
483 unsigned HOST_WIDE_INT
);
484 static rtx
simplify_and_const_int (rtx
, machine_mode
, rtx
,
485 unsigned HOST_WIDE_INT
);
486 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
487 HOST_WIDE_INT
, machine_mode
, int *);
488 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
489 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
491 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
492 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
493 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
495 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
496 static void update_table_tick (rtx
);
497 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
498 static void check_promoted_subreg (rtx_insn
*, rtx
);
499 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
500 static void record_dead_and_set_regs (rtx_insn
*);
501 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
502 static rtx
get_last_value (const_rtx
);
503 static int use_crosses_set_p (const_rtx
, int);
504 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
505 static int reg_dead_at_p (rtx
, rtx_insn
*);
506 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
507 static int reg_bitfield_target_p (rtx
, rtx
);
508 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
509 static void distribute_links (struct insn_link
*);
510 static void mark_used_regs_combine (rtx
);
511 static void record_promoted_value (rtx_insn
*, rtx
);
512 static bool unmentioned_reg_p (rtx
, rtx
);
513 static void record_truncated_values (rtx
*, void *);
514 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
515 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
518 /* It is not safe to use ordinary gen_lowpart in combine.
519 See comments in gen_lowpart_for_combine. */
520 #undef RTL_HOOKS_GEN_LOWPART
521 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
523 /* Our implementation of gen_lowpart never emits a new pseudo. */
524 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
525 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
527 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
528 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
530 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
531 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
533 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
534 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
536 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
539 /* Convenience wrapper for the canonicalize_comparison target hook.
540 Target hooks cannot use enum rtx_code. */
542 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
543 bool op0_preserve_value
)
545 int code_int
= (int)*code
;
546 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
547 *code
= (enum rtx_code
)code_int
;
550 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
551 PATTERN can not be split. Otherwise, it returns an insn sequence.
552 This is a wrapper around split_insns which ensures that the
553 reg_stat vector is made larger if the splitter creates a new
557 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
562 ret
= safe_as_a
<rtx_insn
*> (split_insns (pattern
, insn
));
563 nregs
= max_reg_num ();
564 if (nregs
> reg_stat
.length ())
565 reg_stat
.safe_grow_cleared (nregs
);
569 /* This is used by find_single_use to locate an rtx in LOC that
570 contains exactly one use of DEST, which is typically either a REG
571 or CC0. It returns a pointer to the innermost rtx expression
572 containing DEST. Appearances of DEST that are being used to
573 totally replace it are not counted. */
576 find_single_use_1 (rtx dest
, rtx
*loc
)
579 enum rtx_code code
= GET_CODE (x
);
595 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
596 of a REG that occupies all of the REG, the insn uses DEST if
597 it is mentioned in the destination or the source. Otherwise, we
598 need just check the source. */
599 if (GET_CODE (SET_DEST (x
)) != CC0
600 && GET_CODE (SET_DEST (x
)) != PC
601 && !REG_P (SET_DEST (x
))
602 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
603 && REG_P (SUBREG_REG (SET_DEST (x
)))
604 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
605 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
606 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
607 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
610 return find_single_use_1 (dest
, &SET_SRC (x
));
614 return find_single_use_1 (dest
, &XEXP (x
, 0));
620 /* If it wasn't one of the common cases above, check each expression and
621 vector of this code. Look for a unique usage of DEST. */
623 fmt
= GET_RTX_FORMAT (code
);
624 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
628 if (dest
== XEXP (x
, i
)
629 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
630 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
633 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
636 result
= this_result
;
637 else if (this_result
)
638 /* Duplicate usage. */
641 else if (fmt
[i
] == 'E')
645 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
647 if (XVECEXP (x
, i
, j
) == dest
649 && REG_P (XVECEXP (x
, i
, j
))
650 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
653 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
656 result
= this_result
;
657 else if (this_result
)
667 /* See if DEST, produced in INSN, is used only a single time in the
668 sequel. If so, return a pointer to the innermost rtx expression in which
671 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
673 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
674 care about REG_DEAD notes or LOG_LINKS.
676 Otherwise, we find the single use by finding an insn that has a
677 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
678 only referenced once in that insn, we know that it must be the first
679 and last insn referencing DEST. */
682 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
687 struct insn_link
*link
;
691 next
= NEXT_INSN (insn
);
693 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
696 result
= find_single_use_1 (dest
, &PATTERN (next
));
705 bb
= BLOCK_FOR_INSN (insn
);
706 for (next
= NEXT_INSN (insn
);
707 next
&& BLOCK_FOR_INSN (next
) == bb
;
708 next
= NEXT_INSN (next
))
709 if (INSN_P (next
) && dead_or_set_p (next
, dest
))
711 FOR_EACH_LOG_LINK (link
, next
)
712 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
717 result
= find_single_use_1 (dest
, &PATTERN (next
));
727 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
728 insn. The substitution can be undone by undo_all. If INTO is already
729 set to NEWVAL, do not record this change. Because computing NEWVAL might
730 also call SUBST, we have to compute it before we put anything into
734 do_SUBST (rtx
*into
, rtx newval
)
739 if (oldval
== newval
)
742 /* We'd like to catch as many invalid transformations here as
743 possible. Unfortunately, there are way too many mode changes
744 that are perfectly valid, so we'd waste too much effort for
745 little gain doing the checks here. Focus on catching invalid
746 transformations involving integer constants. */
747 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
748 && CONST_INT_P (newval
))
750 /* Sanity check that we're replacing oldval with a CONST_INT
751 that is a valid sign-extension for the original mode. */
752 gcc_assert (INTVAL (newval
)
753 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
755 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
756 CONST_INT is not valid, because after the replacement, the
757 original mode would be gone. Unfortunately, we can't tell
758 when do_SUBST is called to replace the operand thereof, so we
759 perform this test on oldval instead, checking whether an
760 invalid replacement took place before we got here. */
761 gcc_assert (!(GET_CODE (oldval
) == SUBREG
762 && CONST_INT_P (SUBREG_REG (oldval
))));
763 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
764 && CONST_INT_P (XEXP (oldval
, 0))));
768 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
770 buf
= XNEW (struct undo
);
772 buf
->kind
= UNDO_RTX
;
774 buf
->old_contents
.r
= oldval
;
777 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
780 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
782 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
783 for the value of a HOST_WIDE_INT value (including CONST_INT) is
787 do_SUBST_INT (int *into
, int newval
)
792 if (oldval
== newval
)
796 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
798 buf
= XNEW (struct undo
);
800 buf
->kind
= UNDO_INT
;
802 buf
->old_contents
.i
= oldval
;
805 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
808 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
810 /* Similar to SUBST, but just substitute the mode. This is used when
811 changing the mode of a pseudo-register, so that any other
812 references to the entry in the regno_reg_rtx array will change as
816 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
819 machine_mode oldval
= GET_MODE (*into
);
821 if (oldval
== newval
)
825 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
827 buf
= XNEW (struct undo
);
829 buf
->kind
= UNDO_MODE
;
831 buf
->old_contents
.m
= oldval
;
832 adjust_reg_mode (*into
, newval
);
834 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
837 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
840 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
843 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
846 struct insn_link
* oldval
= *into
;
848 if (oldval
== newval
)
852 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
854 buf
= XNEW (struct undo
);
856 buf
->kind
= UNDO_LINKS
;
858 buf
->old_contents
.l
= oldval
;
861 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
864 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
867 /* Subroutine of try_combine. Determine whether the replacement patterns
868 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
869 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
870 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
871 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
872 of all the instructions can be estimated and the replacements are more
873 expensive than the original sequence. */
876 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
877 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
879 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
880 int new_i2_cost
, new_i3_cost
;
881 int old_cost
, new_cost
;
883 /* Lookup the original insn_rtx_costs. */
884 i2_cost
= INSN_COST (i2
);
885 i3_cost
= INSN_COST (i3
);
889 i1_cost
= INSN_COST (i1
);
892 i0_cost
= INSN_COST (i0
);
893 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
894 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
898 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
899 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
905 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
906 i1_cost
= i0_cost
= 0;
909 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
911 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
915 /* Calculate the replacement insn_rtx_costs. */
916 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
919 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
920 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
921 ? new_i2_cost
+ new_i3_cost
: 0;
925 new_cost
= new_i3_cost
;
929 if (undobuf
.other_insn
)
931 int old_other_cost
, new_other_cost
;
933 old_other_cost
= INSN_COST (undobuf
.other_insn
);
934 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
935 if (old_other_cost
> 0 && new_other_cost
> 0)
937 old_cost
+= old_other_cost
;
938 new_cost
+= new_other_cost
;
944 /* Disallow this combination if both new_cost and old_cost are greater than
945 zero, and new_cost is greater than old cost. */
946 int reject
= old_cost
> 0 && new_cost
> old_cost
;
950 fprintf (dump_file
, "%s combination of insns ",
951 reject
? "rejecting" : "allowing");
953 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
954 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
955 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
956 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
958 fprintf (dump_file
, "original costs ");
960 fprintf (dump_file
, "%d + ", i0_cost
);
961 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
962 fprintf (dump_file
, "%d + ", i1_cost
);
963 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
966 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
967 new_i2_cost
, new_i3_cost
, new_cost
);
969 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
975 /* Update the uid_insn_cost array with the replacement costs. */
976 INSN_COST (i2
) = new_i2_cost
;
977 INSN_COST (i3
) = new_i3_cost
;
989 /* Delete any insns that copy a register to itself. */
992 delete_noop_moves (void)
994 rtx_insn
*insn
, *next
;
997 FOR_EACH_BB_FN (bb
, cfun
)
999 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
1001 next
= NEXT_INSN (insn
);
1002 if (INSN_P (insn
) && noop_move_p (insn
))
1005 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
1007 delete_insn_and_edges (insn
);
1014 /* Return false if we do not want to (or cannot) combine DEF. */
1016 can_combine_def_p (df_ref def
)
1018 /* Do not consider if it is pre/post modification in MEM. */
1019 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
1022 unsigned int regno
= DF_REF_REGNO (def
);
1024 /* Do not combine frame pointer adjustments. */
1025 if ((regno
== FRAME_POINTER_REGNUM
1026 && (!reload_completed
|| frame_pointer_needed
))
1027 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1028 || (regno
== HARD_FRAME_POINTER_REGNUM
1029 && (!reload_completed
|| frame_pointer_needed
))
1031 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1032 || (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
1040 /* Return false if we do not want to (or cannot) combine USE. */
1042 can_combine_use_p (df_ref use
)
1044 /* Do not consider the usage of the stack pointer by function call. */
1045 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1051 /* Fill in log links field for all insns. */
1054 create_log_links (void)
1057 rtx_insn
**next_use
;
1061 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1063 /* Pass through each block from the end, recording the uses of each
1064 register and establishing log links when def is encountered.
1065 Note that we do not clear next_use array in order to save time,
1066 so we have to test whether the use is in the same basic block as def.
1068 There are a few cases below when we do not consider the definition or
1069 usage -- these are taken from original flow.c did. Don't ask me why it is
1070 done this way; I don't know and if it works, I don't want to know. */
1072 FOR_EACH_BB_FN (bb
, cfun
)
1074 FOR_BB_INSNS_REVERSE (bb
, insn
)
1076 if (!NONDEBUG_INSN_P (insn
))
1079 /* Log links are created only once. */
1080 gcc_assert (!LOG_LINKS (insn
));
1082 FOR_EACH_INSN_DEF (def
, insn
)
1084 unsigned int regno
= DF_REF_REGNO (def
);
1087 if (!next_use
[regno
])
1090 if (!can_combine_def_p (def
))
1093 use_insn
= next_use
[regno
];
1094 next_use
[regno
] = NULL
;
1096 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1101 We don't build a LOG_LINK for hard registers contained
1102 in ASM_OPERANDs. If these registers get replaced,
1103 we might wind up changing the semantics of the insn,
1104 even if reload can make what appear to be valid
1105 assignments later. */
1106 if (regno
< FIRST_PSEUDO_REGISTER
1107 && asm_noperands (PATTERN (use_insn
)) >= 0)
1110 /* Don't add duplicate links between instructions. */
1111 struct insn_link
*links
;
1112 FOR_EACH_LOG_LINK (links
, use_insn
)
1113 if (insn
== links
->insn
&& regno
== links
->regno
)
1117 LOG_LINKS (use_insn
)
1118 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1121 FOR_EACH_INSN_USE (use
, insn
)
1122 if (can_combine_use_p (use
))
1123 next_use
[DF_REF_REGNO (use
)] = insn
;
1130 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1131 true if we found a LOG_LINK that proves that A feeds B. This only works
1132 if there are no instructions between A and B which could have a link
1133 depending on A, since in that case we would not record a link for B.
1134 We also check the implicit dependency created by a cc0 setter/user
1138 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1140 struct insn_link
*links
;
1141 FOR_EACH_LOG_LINK (links
, b
)
1142 if (links
->insn
== a
)
1144 if (HAVE_cc0
&& sets_cc0_p (a
))
1149 /* Main entry point for combiner. F is the first insn of the function.
1150 NREGS is the first unused pseudo-reg number.
1152 Return nonzero if the combiner has turned an indirect jump
1153 instruction into a direct jump. */
1155 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1157 rtx_insn
*insn
, *next
;
1161 struct insn_link
*links
, *nextlinks
;
1163 basic_block last_bb
;
1165 int new_direct_jump_p
= 0;
1167 for (first
= f
; first
&& !INSN_P (first
); )
1168 first
= NEXT_INSN (first
);
1172 combine_attempts
= 0;
1175 combine_successes
= 0;
1177 rtl_hooks
= combine_rtl_hooks
;
1179 reg_stat
.safe_grow_cleared (nregs
);
1181 init_recog_no_volatile ();
1183 /* Allocate array for insn info. */
1184 max_uid_known
= get_max_uid ();
1185 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1186 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1187 gcc_obstack_init (&insn_link_obstack
);
1189 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1191 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1192 problems when, for example, we have j <<= 1 in a loop. */
1194 nonzero_sign_valid
= 0;
1195 label_tick
= label_tick_ebb_start
= 1;
1197 /* Scan all SETs and see if we can deduce anything about what
1198 bits are known to be zero for some registers and how many copies
1199 of the sign bit are known to exist for those registers.
1201 Also set any known values so that we can use it while searching
1202 for what bits are known to be set. */
1204 setup_incoming_promotions (first
);
1205 /* Allow the entry block and the first block to fall into the same EBB.
1206 Conceptually the incoming promotions are assigned to the entry block. */
1207 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1209 create_log_links ();
1210 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1212 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1217 if (!single_pred_p (this_basic_block
)
1218 || single_pred (this_basic_block
) != last_bb
)
1219 label_tick_ebb_start
= label_tick
;
1220 last_bb
= this_basic_block
;
1222 FOR_BB_INSNS (this_basic_block
, insn
)
1223 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1229 subst_low_luid
= DF_INSN_LUID (insn
);
1232 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1234 record_dead_and_set_regs (insn
);
1237 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1238 if (REG_NOTE_KIND (links
) == REG_INC
)
1239 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1243 /* Record the current insn_rtx_cost of this instruction. */
1244 if (NONJUMP_INSN_P (insn
))
1245 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1246 optimize_this_for_speed_p
);
1248 fprintf (dump_file
, "insn_cost %d: %d\n",
1249 INSN_UID (insn
), INSN_COST (insn
));
1253 nonzero_sign_valid
= 1;
1255 /* Now scan all the insns in forward order. */
1256 label_tick
= label_tick_ebb_start
= 1;
1258 setup_incoming_promotions (first
);
1259 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1260 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1262 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1264 rtx_insn
*last_combined_insn
= NULL
;
1265 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1270 if (!single_pred_p (this_basic_block
)
1271 || single_pred (this_basic_block
) != last_bb
)
1272 label_tick_ebb_start
= label_tick
;
1273 last_bb
= this_basic_block
;
1275 rtl_profile_for_bb (this_basic_block
);
1276 for (insn
= BB_HEAD (this_basic_block
);
1277 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1278 insn
= next
? next
: NEXT_INSN (insn
))
1281 if (!NONDEBUG_INSN_P (insn
))
1284 while (last_combined_insn
1285 && last_combined_insn
->deleted ())
1286 last_combined_insn
= PREV_INSN (last_combined_insn
);
1287 if (last_combined_insn
== NULL_RTX
1288 || BARRIER_P (last_combined_insn
)
1289 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1290 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1291 last_combined_insn
= insn
;
1293 /* See if we know about function return values before this
1294 insn based upon SUBREG flags. */
1295 check_promoted_subreg (insn
, PATTERN (insn
));
1297 /* See if we can find hardregs and subreg of pseudos in
1298 narrower modes. This could help turning TRUNCATEs
1300 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1302 /* Try this insn with each insn it links back to. */
1304 FOR_EACH_LOG_LINK (links
, insn
)
1305 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1306 NULL
, &new_direct_jump_p
,
1307 last_combined_insn
)) != 0)
1309 statistics_counter_event (cfun
, "two-insn combine", 1);
1313 /* Try each sequence of three linked insns ending with this one. */
1315 if (max_combine
>= 3)
1316 FOR_EACH_LOG_LINK (links
, insn
)
1318 rtx_insn
*link
= links
->insn
;
1320 /* If the linked insn has been replaced by a note, then there
1321 is no point in pursuing this chain any further. */
1325 FOR_EACH_LOG_LINK (nextlinks
, link
)
1326 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1327 NULL
, &new_direct_jump_p
,
1328 last_combined_insn
)) != 0)
1330 statistics_counter_event (cfun
, "three-insn combine", 1);
1336 /* Try to combine a jump insn that uses CC0
1337 with a preceding insn that sets CC0, and maybe with its
1338 logical predecessor as well.
1339 This is how we make decrement-and-branch insns.
1340 We need this special code because data flow connections
1341 via CC0 do not get entered in LOG_LINKS. */
1344 && (prev
= prev_nonnote_insn (insn
)) != 0
1345 && NONJUMP_INSN_P (prev
)
1346 && sets_cc0_p (PATTERN (prev
)))
1348 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1350 last_combined_insn
)) != 0)
1353 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1354 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1355 NULL
, &new_direct_jump_p
,
1356 last_combined_insn
)) != 0)
1360 /* Do the same for an insn that explicitly references CC0. */
1361 if (NONJUMP_INSN_P (insn
)
1362 && (prev
= prev_nonnote_insn (insn
)) != 0
1363 && NONJUMP_INSN_P (prev
)
1364 && sets_cc0_p (PATTERN (prev
))
1365 && GET_CODE (PATTERN (insn
)) == SET
1366 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1368 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1370 last_combined_insn
)) != 0)
1373 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1374 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1375 NULL
, &new_direct_jump_p
,
1376 last_combined_insn
)) != 0)
1380 /* Finally, see if any of the insns that this insn links to
1381 explicitly references CC0. If so, try this insn, that insn,
1382 and its predecessor if it sets CC0. */
1383 FOR_EACH_LOG_LINK (links
, insn
)
1384 if (NONJUMP_INSN_P (links
->insn
)
1385 && GET_CODE (PATTERN (links
->insn
)) == SET
1386 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1387 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1388 && NONJUMP_INSN_P (prev
)
1389 && sets_cc0_p (PATTERN (prev
))
1390 && (next
= try_combine (insn
, links
->insn
,
1391 prev
, NULL
, &new_direct_jump_p
,
1392 last_combined_insn
)) != 0)
1396 /* Try combining an insn with two different insns whose results it
1398 if (max_combine
>= 3)
1399 FOR_EACH_LOG_LINK (links
, insn
)
1400 for (nextlinks
= links
->next
; nextlinks
;
1401 nextlinks
= nextlinks
->next
)
1402 if ((next
= try_combine (insn
, links
->insn
,
1403 nextlinks
->insn
, NULL
,
1405 last_combined_insn
)) != 0)
1408 statistics_counter_event (cfun
, "three-insn combine", 1);
1412 /* Try four-instruction combinations. */
1413 if (max_combine
>= 4)
1414 FOR_EACH_LOG_LINK (links
, insn
)
1416 struct insn_link
*next1
;
1417 rtx_insn
*link
= links
->insn
;
1419 /* If the linked insn has been replaced by a note, then there
1420 is no point in pursuing this chain any further. */
1424 FOR_EACH_LOG_LINK (next1
, link
)
1426 rtx_insn
*link1
= next1
->insn
;
1429 /* I0 -> I1 -> I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1431 if ((next
= try_combine (insn
, link
, link1
,
1434 last_combined_insn
)) != 0)
1436 statistics_counter_event (cfun
, "four-insn combine", 1);
1439 /* I0, I1 -> I2, I2 -> I3. */
1440 for (nextlinks
= next1
->next
; nextlinks
;
1441 nextlinks
= nextlinks
->next
)
1442 if ((next
= try_combine (insn
, link
, link1
,
1445 last_combined_insn
)) != 0)
1447 statistics_counter_event (cfun
, "four-insn combine", 1);
1452 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1454 rtx_insn
*link1
= next1
->insn
;
1457 /* I0 -> I2; I1, I2 -> I3. */
1458 FOR_EACH_LOG_LINK (nextlinks
, link
)
1459 if ((next
= try_combine (insn
, link
, link1
,
1462 last_combined_insn
)) != 0)
1464 statistics_counter_event (cfun
, "four-insn combine", 1);
1467 /* I0 -> I1; I1, I2 -> I3. */
1468 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1469 if ((next
= try_combine (insn
, link
, link1
,
1472 last_combined_insn
)) != 0)
1474 statistics_counter_event (cfun
, "four-insn combine", 1);
1480 /* Try this insn with each REG_EQUAL note it links back to. */
1481 FOR_EACH_LOG_LINK (links
, insn
)
1484 rtx_insn
*temp
= links
->insn
;
1485 if ((set
= single_set (temp
)) != 0
1486 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1487 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1488 /* Avoid using a register that may already been marked
1489 dead by an earlier instruction. */
1490 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1491 && (GET_MODE (note
) == VOIDmode
1492 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1493 : GET_MODE (SET_DEST (set
)) == GET_MODE (note
)))
1495 /* Temporarily replace the set's source with the
1496 contents of the REG_EQUAL note. The insn will
1497 be deleted or recognized by try_combine. */
1498 rtx orig
= SET_SRC (set
);
1499 SET_SRC (set
) = note
;
1501 i2mod_old_rhs
= copy_rtx (orig
);
1502 i2mod_new_rhs
= copy_rtx (note
);
1503 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1505 last_combined_insn
);
1509 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1512 SET_SRC (set
) = orig
;
1517 record_dead_and_set_regs (insn
);
1524 default_rtl_profile ();
1526 new_direct_jump_p
|= purge_all_dead_edges ();
1527 delete_noop_moves ();
1530 obstack_free (&insn_link_obstack
, NULL
);
1531 free (uid_log_links
);
1532 free (uid_insn_cost
);
1533 reg_stat
.release ();
1536 struct undo
*undo
, *next
;
1537 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1545 total_attempts
+= combine_attempts
;
1546 total_merges
+= combine_merges
;
1547 total_extras
+= combine_extras
;
1548 total_successes
+= combine_successes
;
1550 nonzero_sign_valid
= 0;
1551 rtl_hooks
= general_rtl_hooks
;
1553 /* Make recognizer allow volatile MEMs again. */
1556 return new_direct_jump_p
;
1559 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1562 init_reg_last (void)
1567 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1568 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1571 /* Set up any promoted values for incoming argument registers. */
1574 setup_incoming_promotions (rtx_insn
*first
)
1577 bool strictly_local
= false;
1579 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1580 arg
= DECL_CHAIN (arg
))
1582 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1584 machine_mode mode1
, mode2
, mode3
, mode4
;
1586 /* Only continue if the incoming argument is in a register. */
1590 /* Determine, if possible, whether all call sites of the current
1591 function lie within the current compilation unit. (This does
1592 take into account the exporting of a function via taking its
1593 address, and so forth.) */
1594 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1596 /* The mode and signedness of the argument before any promotions happen
1597 (equal to the mode of the pseudo holding it at that stage). */
1598 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1599 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1601 /* The mode and signedness of the argument after any source language and
1602 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1603 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1604 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1606 /* The mode and signedness of the argument as it is actually passed,
1607 see assign_parm_setup_reg in function.c. */
1608 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1609 TREE_TYPE (cfun
->decl
), 0);
1611 /* The mode of the register in which the argument is being passed. */
1612 mode4
= GET_MODE (reg
);
1614 /* Eliminate sign extensions in the callee when:
1615 (a) A mode promotion has occurred; */
1618 /* (b) The mode of the register is the same as the mode of
1619 the argument as it is passed; */
1622 /* (c) There's no language level extension; */
1625 /* (c.1) All callers are from the current compilation unit. If that's
1626 the case we don't have to rely on an ABI, we only have to know
1627 what we're generating right now, and we know that we will do the
1628 mode1 to mode2 promotion with the given sign. */
1629 else if (!strictly_local
)
1631 /* (c.2) The combination of the two promotions is useful. This is
1632 true when the signs match, or if the first promotion is unsigned.
1633 In the later case, (sign_extend (zero_extend x)) is the same as
1634 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1640 /* Record that the value was promoted from mode1 to mode3,
1641 so that any sign extension at the head of the current
1642 function may be eliminated. */
1643 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1644 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1645 record_value_for_reg (reg
, first
, x
);
1649 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1650 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1651 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1652 because some machines (maybe most) will actually do the sign-extension and
1653 this is the conservative approach.
1655 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1659 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1661 if (GET_MODE_PRECISION (mode
) < prec
1662 && CONST_INT_P (src
)
1664 && val_signbit_known_set_p (mode
, INTVAL (src
)))
1665 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (mode
));
1671 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1675 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1678 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1679 unsigned HOST_WIDE_INT bits
= 0;
1680 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1681 unsigned int num
= 0;
1684 reg_equal
= XEXP (reg_equal_note
, 0);
1686 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1687 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1689 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1692 /* Don't call nonzero_bits if it cannot change anything. */
1693 if (rsp
->nonzero_bits
!= ~(unsigned HOST_WIDE_INT
) 0)
1695 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1696 if (reg_equal
&& bits
)
1697 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1698 rsp
->nonzero_bits
|= bits
;
1701 /* Don't call num_sign_bit_copies if it cannot change anything. */
1702 if (rsp
->sign_bit_copies
!= 1)
1704 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1705 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1707 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1708 if (num
== 0 || numeq
> num
)
1711 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1712 rsp
->sign_bit_copies
= num
;
1716 /* Called via note_stores. If X is a pseudo that is narrower than
1717 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1719 If we are setting only a portion of X and we can't figure out what
1720 portion, assume all bits will be used since we don't know what will
1723 Similarly, set how many bits of X are known to be copies of the sign bit
1724 at all locations in the function. This is the smallest number implied
1728 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1730 rtx_insn
*insn
= (rtx_insn
*) data
;
1733 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1734 /* If this register is undefined at the start of the file, we can't
1735 say what its contents were. */
1736 && ! REGNO_REG_SET_P
1737 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1738 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
1740 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1742 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1744 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1745 rsp
->sign_bit_copies
= 1;
1749 /* If this register is being initialized using itself, and the
1750 register is uninitialized in this basic block, and there are
1751 no LOG_LINKS which set the register, then part of the
1752 register is uninitialized. In that case we can't assume
1753 anything about the number of nonzero bits.
1755 ??? We could do better if we checked this in
1756 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1757 could avoid making assumptions about the insn which initially
1758 sets the register, while still using the information in other
1759 insns. We would have to be careful to check every insn
1760 involved in the combination. */
1763 && reg_referenced_p (x
, PATTERN (insn
))
1764 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1767 struct insn_link
*link
;
1769 FOR_EACH_LOG_LINK (link
, insn
)
1770 if (dead_or_set_p (link
->insn
, x
))
1774 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1775 rsp
->sign_bit_copies
= 1;
1780 /* If this is a complex assignment, see if we can convert it into a
1781 simple assignment. */
1782 set
= expand_field_assignment (set
);
1784 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1785 set what we know about X. */
1787 if (SET_DEST (set
) == x
1788 || (paradoxical_subreg_p (SET_DEST (set
))
1789 && SUBREG_REG (SET_DEST (set
)) == x
))
1790 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1793 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1794 rsp
->sign_bit_copies
= 1;
1799 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1800 optionally insns that were previously combined into I3 or that will be
1801 combined into the merger of INSN and I3. The order is PRED, PRED2,
1802 INSN, SUCC, SUCC2, I3.
1804 Return 0 if the combination is not allowed for any reason.
1806 If the combination is allowed, *PDEST will be set to the single
1807 destination of INSN and *PSRC to the single source, and this function
1811 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1812 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1813 rtx
*pdest
, rtx
*psrc
)
1822 bool all_adjacent
= true;
1823 int (*is_volatile_p
) (const_rtx
);
1829 if (next_active_insn (succ2
) != i3
)
1830 all_adjacent
= false;
1831 if (next_active_insn (succ
) != succ2
)
1832 all_adjacent
= false;
1834 else if (next_active_insn (succ
) != i3
)
1835 all_adjacent
= false;
1836 if (next_active_insn (insn
) != succ
)
1837 all_adjacent
= false;
1839 else if (next_active_insn (insn
) != i3
)
1840 all_adjacent
= false;
1842 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1843 or a PARALLEL consisting of such a SET and CLOBBERs.
1845 If INSN has CLOBBER parallel parts, ignore them for our processing.
1846 By definition, these happen during the execution of the insn. When it
1847 is merged with another insn, all bets are off. If they are, in fact,
1848 needed and aren't also supplied in I3, they may be added by
1849 recog_for_combine. Otherwise, it won't match.
1851 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1854 Get the source and destination of INSN. If more than one, can't
1857 if (GET_CODE (PATTERN (insn
)) == SET
)
1858 set
= PATTERN (insn
);
1859 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1860 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1862 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1864 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1866 switch (GET_CODE (elt
))
1868 /* This is important to combine floating point insns
1869 for the SH4 port. */
1871 /* Combining an isolated USE doesn't make sense.
1872 We depend here on combinable_i3pat to reject them. */
1873 /* The code below this loop only verifies that the inputs of
1874 the SET in INSN do not change. We call reg_set_between_p
1875 to verify that the REG in the USE does not change between
1877 If the USE in INSN was for a pseudo register, the matching
1878 insn pattern will likely match any register; combining this
1879 with any other USE would only be safe if we knew that the
1880 used registers have identical values, or if there was
1881 something to tell them apart, e.g. different modes. For
1882 now, we forgo such complicated tests and simply disallow
1883 combining of USES of pseudo registers with any other USE. */
1884 if (REG_P (XEXP (elt
, 0))
1885 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1887 rtx i3pat
= PATTERN (i3
);
1888 int i
= XVECLEN (i3pat
, 0) - 1;
1889 unsigned int regno
= REGNO (XEXP (elt
, 0));
1893 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1895 if (GET_CODE (i3elt
) == USE
1896 && REG_P (XEXP (i3elt
, 0))
1897 && (REGNO (XEXP (i3elt
, 0)) == regno
1898 ? reg_set_between_p (XEXP (elt
, 0),
1899 PREV_INSN (insn
), i3
)
1900 : regno
>= FIRST_PSEUDO_REGISTER
))
1907 /* We can ignore CLOBBERs. */
1912 /* Ignore SETs whose result isn't used but not those that
1913 have side-effects. */
1914 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1915 && insn_nothrow_p (insn
)
1916 && !side_effects_p (elt
))
1919 /* If we have already found a SET, this is a second one and
1920 so we cannot combine with this insn. */
1928 /* Anything else means we can't combine. */
1934 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1935 so don't do anything with it. */
1936 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1945 /* The simplification in expand_field_assignment may call back to
1946 get_last_value, so set safe guard here. */
1947 subst_low_luid
= DF_INSN_LUID (insn
);
1949 set
= expand_field_assignment (set
);
1950 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1952 /* Do not eliminate user-specified register if it is in an
1953 asm input because we may break the register asm usage defined
1954 in GCC manual if allow to do so.
1955 Be aware that this may cover more cases than we expect but this
1956 should be harmless. */
1957 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1958 && extract_asm_operands (PATTERN (i3
)))
1961 /* Don't eliminate a store in the stack pointer. */
1962 if (dest
== stack_pointer_rtx
1963 /* Don't combine with an insn that sets a register to itself if it has
1964 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1965 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1966 /* Can't merge an ASM_OPERANDS. */
1967 || GET_CODE (src
) == ASM_OPERANDS
1968 /* Can't merge a function call. */
1969 || GET_CODE (src
) == CALL
1970 /* Don't eliminate a function call argument. */
1972 && (find_reg_fusage (i3
, USE
, dest
)
1974 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1975 && global_regs
[REGNO (dest
)])))
1976 /* Don't substitute into an incremented register. */
1977 || FIND_REG_INC_NOTE (i3
, dest
)
1978 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1979 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1980 /* Don't substitute into a non-local goto, this confuses CFG. */
1981 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1982 /* Make sure that DEST is not used after SUCC but before I3. */
1985 && (reg_used_between_p (dest
, succ2
, i3
)
1986 || reg_used_between_p (dest
, succ
, succ2
)))
1987 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))))
1988 /* Make sure that the value that is to be substituted for the register
1989 does not use any registers whose values alter in between. However,
1990 If the insns are adjacent, a use can't cross a set even though we
1991 think it might (this can happen for a sequence of insns each setting
1992 the same destination; last_set of that register might point to
1993 a NOTE). If INSN has a REG_EQUIV note, the register is always
1994 equivalent to the memory so the substitution is valid even if there
1995 are intervening stores. Also, don't move a volatile asm or
1996 UNSPEC_VOLATILE across any other insns. */
1999 || ! find_reg_note (insn
, REG_EQUIV
, src
))
2000 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
2001 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
2002 || GET_CODE (src
) == UNSPEC_VOLATILE
))
2003 /* Don't combine across a CALL_INSN, because that would possibly
2004 change whether the life span of some REGs crosses calls or not,
2005 and it is a pain to update that information.
2006 Exception: if source is a constant, moving it later can't hurt.
2007 Accept that as a special case. */
2008 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2011 /* DEST must either be a REG or CC0. */
2014 /* If register alignment is being enforced for multi-word items in all
2015 cases except for parameters, it is possible to have a register copy
2016 insn referencing a hard register that is not allowed to contain the
2017 mode being copied and which would not be valid as an operand of most
2018 insns. Eliminate this problem by not combining with such an insn.
2020 Also, on some machines we don't want to extend the life of a hard
2024 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2025 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
2026 /* Don't extend the life of a hard register unless it is
2027 user variable (if we have few registers) or it can't
2028 fit into the desired register (meaning something special
2030 Also avoid substituting a return register into I3, because
2031 reload can't handle a conflict with constraints of other
2033 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2034 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
2037 else if (GET_CODE (dest
) != CC0
)
2041 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2042 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2043 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2045 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2047 /* If the clobber represents an earlyclobber operand, we must not
2048 substitute an expression containing the clobbered register.
2049 As we do not analyze the constraint strings here, we have to
2050 make the conservative assumption. However, if the register is
2051 a fixed hard reg, the clobber cannot represent any operand;
2052 we leave it up to the machine description to either accept or
2053 reject use-and-clobber patterns. */
2055 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2056 || !fixed_regs
[REGNO (reg
)])
2057 if (reg_overlap_mentioned_p (reg
, src
))
2061 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2062 or not), reject, unless nothing volatile comes between it and I3 */
2064 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2066 /* Make sure neither succ nor succ2 contains a volatile reference. */
2067 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2069 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2071 /* We'll check insns between INSN and I3 below. */
2074 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2075 to be an explicit register variable, and was chosen for a reason. */
2077 if (GET_CODE (src
) == ASM_OPERANDS
2078 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2081 /* If INSN contains volatile references (specifically volatile MEMs),
2082 we cannot combine across any other volatile references.
2083 Even if INSN doesn't contain volatile references, any intervening
2084 volatile insn might affect machine state. */
2086 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2090 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2091 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2094 /* If INSN contains an autoincrement or autodecrement, make sure that
2095 register is not used between there and I3, and not already used in
2096 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2097 Also insist that I3 not be a jump; if it were one
2098 and the incremented register were spilled, we would lose. */
2101 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2102 if (REG_NOTE_KIND (link
) == REG_INC
2104 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2105 || (pred
!= NULL_RTX
2106 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2107 || (pred2
!= NULL_RTX
2108 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2109 || (succ
!= NULL_RTX
2110 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2111 || (succ2
!= NULL_RTX
2112 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2113 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2117 /* Don't combine an insn that follows a CC0-setting insn.
2118 An insn that uses CC0 must not be separated from the one that sets it.
2119 We do, however, allow I2 to follow a CC0-setting insn if that insn
2120 is passed as I1; in that case it will be deleted also.
2121 We also allow combining in this case if all the insns are adjacent
2122 because that would leave the two CC0 insns adjacent as well.
2123 It would be more logical to test whether CC0 occurs inside I1 or I2,
2124 but that would be much slower, and this ought to be equivalent. */
2128 p
= prev_nonnote_insn (insn
);
2129 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2134 /* If we get here, we have passed all the tests and the combination is
2143 /* LOC is the location within I3 that contains its pattern or the component
2144 of a PARALLEL of the pattern. We validate that it is valid for combining.
2146 One problem is if I3 modifies its output, as opposed to replacing it
2147 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2148 doing so would produce an insn that is not equivalent to the original insns.
2152 (set (reg:DI 101) (reg:DI 100))
2153 (set (subreg:SI (reg:DI 101) 0) <foo>)
2155 This is NOT equivalent to:
2157 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2158 (set (reg:DI 101) (reg:DI 100))])
2160 Not only does this modify 100 (in which case it might still be valid
2161 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2163 We can also run into a problem if I2 sets a register that I1
2164 uses and I1 gets directly substituted into I3 (not via I2). In that
2165 case, we would be getting the wrong value of I2DEST into I3, so we
2166 must reject the combination. This case occurs when I2 and I1 both
2167 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2168 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2169 of a SET must prevent combination from occurring. The same situation
2170 can occur for I0, in which case I0_NOT_IN_SRC is set.
2172 Before doing the above check, we first try to expand a field assignment
2173 into a set of logical operations.
2175 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2176 we place a register that is both set and used within I3. If more than one
2177 such register is detected, we fail.
2179 Return 1 if the combination is valid, zero otherwise. */
2182 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2183 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2187 if (GET_CODE (x
) == SET
)
2190 rtx dest
= SET_DEST (set
);
2191 rtx src
= SET_SRC (set
);
2192 rtx inner_dest
= dest
;
2195 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2196 || GET_CODE (inner_dest
) == SUBREG
2197 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2198 inner_dest
= XEXP (inner_dest
, 0);
2200 /* Check for the case where I3 modifies its output, as discussed
2201 above. We don't want to prevent pseudos from being combined
2202 into the address of a MEM, so only prevent the combination if
2203 i1 or i2 set the same MEM. */
2204 if ((inner_dest
!= dest
&&
2205 (!MEM_P (inner_dest
)
2206 || rtx_equal_p (i2dest
, inner_dest
)
2207 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2208 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2209 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2210 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2211 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2213 /* This is the same test done in can_combine_p except we can't test
2214 all_adjacent; we don't have to, since this instruction will stay
2215 in place, thus we are not considering increasing the lifetime of
2218 Also, if this insn sets a function argument, combining it with
2219 something that might need a spill could clobber a previous
2220 function argument; the all_adjacent test in can_combine_p also
2221 checks this; here, we do a more specific test for this case. */
2223 || (REG_P (inner_dest
)
2224 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2225 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2226 GET_MODE (inner_dest
))))
2227 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2228 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2231 /* If DEST is used in I3, it is being killed in this insn, so
2232 record that for later. We have to consider paradoxical
2233 subregs here, since they kill the whole register, but we
2234 ignore partial subregs, STRICT_LOW_PART, etc.
2235 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2236 STACK_POINTER_REGNUM, since these are always considered to be
2237 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2239 if (GET_CODE (subdest
) == SUBREG
2240 && (GET_MODE_SIZE (GET_MODE (subdest
))
2241 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2242 subdest
= SUBREG_REG (subdest
);
2245 && reg_referenced_p (subdest
, PATTERN (i3
))
2246 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2247 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2248 && REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
2250 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2251 && (REGNO (subdest
) != ARG_POINTER_REGNUM
2252 || ! fixed_regs
[REGNO (subdest
)])
2254 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2256 if (*pi3dest_killed
)
2259 *pi3dest_killed
= subdest
;
2263 else if (GET_CODE (x
) == PARALLEL
)
2267 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2268 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2269 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2276 /* Return 1 if X is an arithmetic expression that contains a multiplication
2277 and division. We don't count multiplications by powers of two here. */
2280 contains_muldiv (rtx x
)
2282 switch (GET_CODE (x
))
2284 case MOD
: case DIV
: case UMOD
: case UDIV
:
2288 return ! (CONST_INT_P (XEXP (x
, 1))
2289 && exact_log2 (UINTVAL (XEXP (x
, 1))) >= 0);
2292 return contains_muldiv (XEXP (x
, 0))
2293 || contains_muldiv (XEXP (x
, 1));
2296 return contains_muldiv (XEXP (x
, 0));
2302 /* Determine whether INSN can be used in a combination. Return nonzero if
2303 not. This is used in try_combine to detect early some cases where we
2304 can't perform combinations. */
2307 cant_combine_insn_p (rtx_insn
*insn
)
2312 /* If this isn't really an insn, we can't do anything.
2313 This can occur when flow deletes an insn that it has merged into an
2314 auto-increment address. */
2315 if (! INSN_P (insn
))
2318 /* Never combine loads and stores involving hard regs that are likely
2319 to be spilled. The register allocator can usually handle such
2320 reg-reg moves by tying. If we allow the combiner to make
2321 substitutions of likely-spilled regs, reload might die.
2322 As an exception, we allow combinations involving fixed regs; these are
2323 not available to the register allocator so there's no risk involved. */
2325 set
= single_set (insn
);
2328 src
= SET_SRC (set
);
2329 dest
= SET_DEST (set
);
2330 if (GET_CODE (src
) == SUBREG
)
2331 src
= SUBREG_REG (src
);
2332 if (GET_CODE (dest
) == SUBREG
)
2333 dest
= SUBREG_REG (dest
);
2334 if (REG_P (src
) && REG_P (dest
)
2335 && ((HARD_REGISTER_P (src
)
2336 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2337 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2338 || (HARD_REGISTER_P (dest
)
2339 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2340 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2346 struct likely_spilled_retval_info
2348 unsigned regno
, nregs
;
2352 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2353 hard registers that are known to be written to / clobbered in full. */
2355 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2357 struct likely_spilled_retval_info
*const info
=
2358 (struct likely_spilled_retval_info
*) data
;
2359 unsigned regno
, nregs
;
2362 if (!REG_P (XEXP (set
, 0)))
2365 if (regno
>= info
->regno
+ info
->nregs
)
2367 nregs
= REG_NREGS (x
);
2368 if (regno
+ nregs
<= info
->regno
)
2370 new_mask
= (2U << (nregs
- 1)) - 1;
2371 if (regno
< info
->regno
)
2372 new_mask
>>= info
->regno
- regno
;
2374 new_mask
<<= regno
- info
->regno
;
2375 info
->mask
&= ~new_mask
;
2378 /* Return nonzero iff part of the return value is live during INSN, and
2379 it is likely spilled. This can happen when more than one insn is needed
2380 to copy the return value, e.g. when we consider to combine into the
2381 second copy insn for a complex value. */
2384 likely_spilled_retval_p (rtx_insn
*insn
)
2386 rtx_insn
*use
= BB_END (this_basic_block
);
2389 unsigned regno
, nregs
;
2390 /* We assume here that no machine mode needs more than
2391 32 hard registers when the value overlaps with a register
2392 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2394 struct likely_spilled_retval_info info
;
2396 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2398 reg
= XEXP (PATTERN (use
), 0);
2399 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2401 regno
= REGNO (reg
);
2402 nregs
= REG_NREGS (reg
);
2405 mask
= (2U << (nregs
- 1)) - 1;
2407 /* Disregard parts of the return value that are set later. */
2411 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2413 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2416 /* Check if any of the (probably) live return value registers is
2421 if ((mask
& 1 << nregs
)
2422 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2428 /* Adjust INSN after we made a change to its destination.
2430 Changing the destination can invalidate notes that say something about
2431 the results of the insn and a LOG_LINK pointing to the insn. */
2434 adjust_for_new_dest (rtx_insn
*insn
)
2436 /* For notes, be conservative and simply remove them. */
2437 remove_reg_equal_equiv_notes (insn
);
2439 /* The new insn will have a destination that was previously the destination
2440 of an insn just above it. Call distribute_links to make a LOG_LINK from
2441 the next use of that destination. */
2443 rtx set
= single_set (insn
);
2446 rtx reg
= SET_DEST (set
);
2448 while (GET_CODE (reg
) == ZERO_EXTRACT
2449 || GET_CODE (reg
) == STRICT_LOW_PART
2450 || GET_CODE (reg
) == SUBREG
)
2451 reg
= XEXP (reg
, 0);
2452 gcc_assert (REG_P (reg
));
2454 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2456 df_insn_rescan (insn
);
2459 /* Return TRUE if combine can reuse reg X in mode MODE.
2460 ADDED_SETS is nonzero if the original set is still required. */
2462 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2470 /* Allow hard registers if the new mode is legal, and occupies no more
2471 registers than the old mode. */
2472 if (regno
< FIRST_PSEUDO_REGISTER
)
2473 return (HARD_REGNO_MODE_OK (regno
, mode
)
2474 && REG_NREGS (x
) >= hard_regno_nregs
[regno
][mode
]);
2476 /* Or a pseudo that is only used once. */
2477 return (regno
< reg_n_sets_max
2478 && REG_N_SETS (regno
) == 1
2480 && !REG_USERVAR_P (x
));
2484 /* Check whether X, the destination of a set, refers to part of
2485 the register specified by REG. */
2488 reg_subword_p (rtx x
, rtx reg
)
2490 /* Check that reg is an integer mode register. */
2491 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2494 if (GET_CODE (x
) == STRICT_LOW_PART
2495 || GET_CODE (x
) == ZERO_EXTRACT
)
2498 return GET_CODE (x
) == SUBREG
2499 && SUBREG_REG (x
) == reg
2500 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2503 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2504 Note that the INSN should be deleted *after* removing dead edges, so
2505 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2506 but not for a (set (pc) (label_ref FOO)). */
2509 update_cfg_for_uncondjump (rtx_insn
*insn
)
2511 basic_block bb
= BLOCK_FOR_INSN (insn
);
2512 gcc_assert (BB_END (bb
) == insn
);
2514 purge_dead_edges (bb
);
2517 if (EDGE_COUNT (bb
->succs
) == 1)
2521 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2523 /* Remove barriers from the footer if there are any. */
2524 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2525 if (BARRIER_P (insn
))
2527 if (PREV_INSN (insn
))
2528 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2530 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2531 if (NEXT_INSN (insn
))
2532 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2534 else if (LABEL_P (insn
))
2539 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2540 by an arbitrary number of CLOBBERs. */
2542 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2544 if (GET_CODE (pat
) != PARALLEL
)
2547 int len
= XVECLEN (pat
, 0);
2552 for (i
= 0; i
< n
; i
++)
2553 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2554 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2556 for ( ; i
< len
; i
++)
2557 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
2564 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2565 CLOBBERs), can be split into individual SETs in that order, without
2566 changing semantics. */
2568 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2570 if (!insn_nothrow_p (insn
))
2573 rtx pat
= PATTERN (insn
);
2576 for (i
= 0; i
< n
; i
++)
2578 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2581 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2583 for (j
= i
+ 1; j
< n
; j
++)
2584 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2592 /* Try to combine the insns I0, I1 and I2 into I3.
2593 Here I0, I1 and I2 appear earlier than I3.
2594 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2597 If we are combining more than two insns and the resulting insn is not
2598 recognized, try splitting it into two insns. If that happens, I2 and I3
2599 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2600 Otherwise, I0, I1 and I2 are pseudo-deleted.
2602 Return 0 if the combination does not work. Then nothing is changed.
2603 If we did the combination, return the insn at which combine should
2606 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2607 new direct jump instruction.
2609 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2610 been I3 passed to an earlier try_combine within the same basic
2614 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2615 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2617 /* New patterns for I3 and I2, respectively. */
2618 rtx newpat
, newi2pat
= 0;
2619 rtvec newpat_vec_with_clobbers
= 0;
2620 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2621 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2623 int added_sets_0
, added_sets_1
, added_sets_2
;
2624 /* Total number of SETs to put into I3. */
2626 /* Nonzero if I2's or I1's body now appears in I3. */
2627 int i2_is_used
= 0, i1_is_used
= 0;
2628 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2629 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2630 /* Contains I3 if the destination of I3 is used in its source, which means
2631 that the old life of I3 is being killed. If that usage is placed into
2632 I2 and not in I3, a REG_DEAD note must be made. */
2633 rtx i3dest_killed
= 0;
2634 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2635 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2636 /* Copy of SET_SRC of I1 and I0, if needed. */
2637 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2638 /* Set if I2DEST was reused as a scratch register. */
2639 bool i2scratch
= false;
2640 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2641 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2642 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2643 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2644 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2645 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2646 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2647 /* Notes that must be added to REG_NOTES in I3 and I2. */
2648 rtx new_i3_notes
, new_i2_notes
;
2649 /* Notes that we substituted I3 into I2 instead of the normal case. */
2650 int i3_subst_into_i2
= 0;
2651 /* Notes that I1, I2 or I3 is a MULT operation. */
2654 int changed_i3_dest
= 0;
2657 rtx_insn
*temp_insn
;
2659 struct insn_link
*link
;
2661 rtx new_other_notes
;
2664 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2666 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2669 /* Only try four-insn combinations when there's high likelihood of
2670 success. Look for simple insns, such as loads of constants or
2671 binary operations involving a constant. */
2679 if (!flag_expensive_optimizations
)
2682 for (i
= 0; i
< 4; i
++)
2684 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2685 rtx set
= single_set (insn
);
2689 src
= SET_SRC (set
);
2690 if (CONSTANT_P (src
))
2695 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2697 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2698 || GET_CODE (src
) == LSHIFTRT
)
2702 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2703 are likely manipulating its value. Ideally we'll be able to combine
2704 all four insns into a bitfield insertion of some kind.
2706 Note the source in I0 might be inside a sign/zero extension and the
2707 memory modes in I0 and I3 might be different. So extract the address
2708 from the destination of I3 and search for it in the source of I0.
2710 In the event that there's a match but the source/dest do not actually
2711 refer to the same memory, the worst that happens is we try some
2712 combinations that we wouldn't have otherwise. */
2713 if ((set0
= single_set (i0
))
2714 /* Ensure the source of SET0 is a MEM, possibly buried inside
2716 && (GET_CODE (SET_SRC (set0
)) == MEM
2717 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2718 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2719 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2720 && (set3
= single_set (i3
))
2721 /* Ensure the destination of SET3 is a MEM. */
2722 && GET_CODE (SET_DEST (set3
)) == MEM
2723 /* Would it be better to extract the base address for the MEM
2724 in SET3 and look for that? I don't have cases where it matters
2725 but I could envision such cases. */
2726 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2729 if (ngood
< 2 && nshift
< 2)
2733 /* Exit early if one of the insns involved can't be used for
2736 || (i1
&& CALL_P (i1
))
2737 || (i0
&& CALL_P (i0
))
2738 || cant_combine_insn_p (i3
)
2739 || cant_combine_insn_p (i2
)
2740 || (i1
&& cant_combine_insn_p (i1
))
2741 || (i0
&& cant_combine_insn_p (i0
))
2742 || likely_spilled_retval_p (i3
))
2746 undobuf
.other_insn
= 0;
2748 /* Reset the hard register usage information. */
2749 CLEAR_HARD_REG_SET (newpat_used_regs
);
2751 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2754 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2755 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2757 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2758 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2760 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2761 INSN_UID (i2
), INSN_UID (i3
));
2764 /* If multiple insns feed into one of I2 or I3, they can be in any
2765 order. To simplify the code below, reorder them in sequence. */
2766 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2767 temp_insn
= i2
, i2
= i0
, i0
= temp_insn
;
2768 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2769 temp_insn
= i1
, i1
= i0
, i0
= temp_insn
;
2770 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2771 temp_insn
= i1
, i1
= i2
, i2
= temp_insn
;
2773 added_links_insn
= 0;
2775 /* First check for one important special case that the code below will
2776 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2777 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2778 we may be able to replace that destination with the destination of I3.
2779 This occurs in the common code where we compute both a quotient and
2780 remainder into a structure, in which case we want to do the computation
2781 directly into the structure to avoid register-register copies.
2783 Note that this case handles both multiple sets in I2 and also cases
2784 where I2 has a number of CLOBBERs inside the PARALLEL.
2786 We make very conservative checks below and only try to handle the
2787 most common cases of this. For example, we only handle the case
2788 where I2 and I3 are adjacent to avoid making difficult register
2791 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2792 && REG_P (SET_SRC (PATTERN (i3
)))
2793 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2794 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2795 && GET_CODE (PATTERN (i2
)) == PARALLEL
2796 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2797 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2798 below would need to check what is inside (and reg_overlap_mentioned_p
2799 doesn't support those codes anyway). Don't allow those destinations;
2800 the resulting insn isn't likely to be recognized anyway. */
2801 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2802 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2803 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2804 SET_DEST (PATTERN (i3
)))
2805 && next_active_insn (i2
) == i3
)
2807 rtx p2
= PATTERN (i2
);
2809 /* Make sure that the destination of I3,
2810 which we are going to substitute into one output of I2,
2811 is not used within another output of I2. We must avoid making this:
2812 (parallel [(set (mem (reg 69)) ...)
2813 (set (reg 69) ...)])
2814 which is not well-defined as to order of actions.
2815 (Besides, reload can't handle output reloads for this.)
2817 The problem can also happen if the dest of I3 is a memory ref,
2818 if another dest in I2 is an indirect memory ref. */
2819 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2820 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2821 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2822 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2823 SET_DEST (XVECEXP (p2
, 0, i
))))
2826 /* Make sure this PARALLEL is not an asm. We do not allow combining
2827 that usually (see can_combine_p), so do not here either. */
2828 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2829 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2830 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2833 if (i
== XVECLEN (p2
, 0))
2834 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2835 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2836 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2841 subst_low_luid
= DF_INSN_LUID (i2
);
2843 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2844 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2845 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2846 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2848 /* Replace the dest in I2 with our dest and make the resulting
2849 insn the new pattern for I3. Then skip to where we validate
2850 the pattern. Everything was set up above. */
2851 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2853 i3_subst_into_i2
= 1;
2854 goto validate_replacement
;
2858 /* If I2 is setting a pseudo to a constant and I3 is setting some
2859 sub-part of it to another constant, merge them by making a new
2862 && (temp_expr
= single_set (i2
)) != 0
2863 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2864 && GET_CODE (PATTERN (i3
)) == SET
2865 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2866 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2868 rtx dest
= SET_DEST (PATTERN (i3
));
2872 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2874 if (CONST_INT_P (XEXP (dest
, 1))
2875 && CONST_INT_P (XEXP (dest
, 2)))
2877 width
= INTVAL (XEXP (dest
, 1));
2878 offset
= INTVAL (XEXP (dest
, 2));
2879 dest
= XEXP (dest
, 0);
2880 if (BITS_BIG_ENDIAN
)
2881 offset
= GET_MODE_PRECISION (GET_MODE (dest
)) - width
- offset
;
2886 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2887 dest
= XEXP (dest
, 0);
2888 width
= GET_MODE_PRECISION (GET_MODE (dest
));
2894 /* If this is the low part, we're done. */
2895 if (subreg_lowpart_p (dest
))
2897 /* Handle the case where inner is twice the size of outer. */
2898 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr
)))
2899 == 2 * GET_MODE_PRECISION (GET_MODE (dest
)))
2900 offset
+= GET_MODE_PRECISION (GET_MODE (dest
));
2901 /* Otherwise give up for now. */
2908 rtx inner
= SET_SRC (PATTERN (i3
));
2909 rtx outer
= SET_SRC (temp_expr
);
2912 = wi::insert (std::make_pair (outer
, GET_MODE (SET_DEST (temp_expr
))),
2913 std::make_pair (inner
, GET_MODE (dest
)),
2918 subst_low_luid
= DF_INSN_LUID (i2
);
2919 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2920 i2dest
= SET_DEST (temp_expr
);
2921 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2923 /* Replace the source in I2 with the new constant and make the
2924 resulting insn the new pattern for I3. Then skip to where we
2925 validate the pattern. Everything was set up above. */
2926 SUBST (SET_SRC (temp_expr
),
2927 immed_wide_int_const (o
, GET_MODE (SET_DEST (temp_expr
))));
2929 newpat
= PATTERN (i2
);
2931 /* The dest of I3 has been replaced with the dest of I2. */
2932 changed_i3_dest
= 1;
2933 goto validate_replacement
;
2938 /* If we have no I1 and I2 looks like:
2939 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2941 make up a dummy I1 that is
2944 (set (reg:CC X) (compare:CC Y (const_int 0)))
2946 (We can ignore any trailing CLOBBERs.)
2948 This undoes a previous combination and allows us to match a branch-and-
2952 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2953 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2955 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2956 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2957 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2958 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2959 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2960 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2962 /* We make I1 with the same INSN_UID as I2. This gives it
2963 the same DF_INSN_LUID for value tracking. Our fake I1 will
2964 never appear in the insn stream so giving it the same INSN_UID
2965 as I2 will not cause a problem. */
2967 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2968 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2970 INSN_UID (i1
) = INSN_UID (i2
);
2972 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2973 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2974 SET_DEST (PATTERN (i1
)));
2975 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2976 SUBST_LINK (LOG_LINKS (i2
),
2977 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2980 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2981 make those two SETs separate I1 and I2 insns, and make an I0 that is
2984 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2985 && can_split_parallel_of_n_reg_sets (i2
, 2)
2986 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2987 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2989 /* If there is no I1, there is no I0 either. */
2992 /* We make I1 with the same INSN_UID as I2. This gives it
2993 the same DF_INSN_LUID for value tracking. Our fake I1 will
2994 never appear in the insn stream so giving it the same INSN_UID
2995 as I2 will not cause a problem. */
2997 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2998 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
3000 INSN_UID (i1
) = INSN_UID (i2
);
3002 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
3006 /* Verify that I2 and I1 are valid for combining. */
3007 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
3008 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
3010 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
3017 /* Record whether I2DEST is used in I2SRC and similarly for the other
3018 cases. Knowing this will help in register status updating below. */
3019 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3020 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3021 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3022 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3023 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3024 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3025 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3026 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3027 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3029 /* For the earlier insns, determine which of the subsequent ones they
3031 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3032 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3033 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3034 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3035 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3037 /* Ensure that I3's pattern can be the destination of combines. */
3038 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3039 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3040 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3041 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3048 /* See if any of the insns is a MULT operation. Unless one is, we will
3049 reject a combination that is, since it must be slower. Be conservative
3051 if (GET_CODE (i2src
) == MULT
3052 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3053 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3054 || (GET_CODE (PATTERN (i3
)) == SET
3055 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3058 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3059 We used to do this EXCEPT in one case: I3 has a post-inc in an
3060 output operand. However, that exception can give rise to insns like
3062 which is a famous insn on the PDP-11 where the value of r3 used as the
3063 source was model-dependent. Avoid this sort of thing. */
3066 if (!(GET_CODE (PATTERN (i3
)) == SET
3067 && REG_P (SET_SRC (PATTERN (i3
)))
3068 && MEM_P (SET_DEST (PATTERN (i3
)))
3069 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3070 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3071 /* It's not the exception. */
3076 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3077 if (REG_NOTE_KIND (link
) == REG_INC
3078 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3080 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3088 /* See if the SETs in I1 or I2 need to be kept around in the merged
3089 instruction: whenever the value set there is still needed past I3.
3090 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3092 For the SET in I1, we have two cases: if I1 and I2 independently feed
3093 into I3, the set in I1 needs to be kept around unless I1DEST dies
3094 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3095 in I1 needs to be kept around unless I1DEST dies or is set in either
3096 I2 or I3. The same considerations apply to I0. */
3098 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3101 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3102 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3107 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3108 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3109 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3110 && dead_or_set_p (i2
, i0dest
)));
3114 /* We are about to copy insns for the case where they need to be kept
3115 around. Check that they can be copied in the merged instruction. */
3117 if (targetm
.cannot_copy_insn_p
3118 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3119 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3120 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3126 /* If the set in I2 needs to be kept around, we must make a copy of
3127 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3128 PATTERN (I2), we are only substituting for the original I1DEST, not into
3129 an already-substituted copy. This also prevents making self-referential
3130 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3135 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3136 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3138 i2pat
= copy_rtx (PATTERN (i2
));
3143 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3144 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3146 i1pat
= copy_rtx (PATTERN (i1
));
3151 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3152 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3154 i0pat
= copy_rtx (PATTERN (i0
));
3159 /* Substitute in the latest insn for the regs set by the earlier ones. */
3161 maxreg
= max_reg_num ();
3165 /* Many machines that don't use CC0 have insns that can both perform an
3166 arithmetic operation and set the condition code. These operations will
3167 be represented as a PARALLEL with the first element of the vector
3168 being a COMPARE of an arithmetic operation with the constant zero.
3169 The second element of the vector will set some pseudo to the result
3170 of the same arithmetic operation. If we simplify the COMPARE, we won't
3171 match such a pattern and so will generate an extra insn. Here we test
3172 for this case, where both the comparison and the operation result are
3173 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3174 I2SRC. Later we will make the PARALLEL that contains I2. */
3176 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3177 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3178 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3179 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3182 rtx
*cc_use_loc
= NULL
;
3183 rtx_insn
*cc_use_insn
= NULL
;
3184 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3185 machine_mode compare_mode
, orig_compare_mode
;
3186 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3188 newpat
= PATTERN (i3
);
3189 newpat_dest
= SET_DEST (newpat
);
3190 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3192 if (undobuf
.other_insn
== 0
3193 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3196 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3197 compare_code
= simplify_compare_const (compare_code
,
3198 GET_MODE (i2dest
), op0
, &op1
);
3199 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3202 /* Do the rest only if op1 is const0_rtx, which may be the
3203 result of simplification. */
3204 if (op1
== const0_rtx
)
3206 /* If a single use of the CC is found, prepare to modify it
3207 when SELECT_CC_MODE returns a new CC-class mode, or when
3208 the above simplify_compare_const() returned a new comparison
3209 operator. undobuf.other_insn is assigned the CC use insn
3210 when modifying it. */
3213 #ifdef SELECT_CC_MODE
3214 machine_mode new_mode
3215 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3216 if (new_mode
!= orig_compare_mode
3217 && can_change_dest_mode (SET_DEST (newpat
),
3218 added_sets_2
, new_mode
))
3220 unsigned int regno
= REGNO (newpat_dest
);
3221 compare_mode
= new_mode
;
3222 if (regno
< FIRST_PSEUDO_REGISTER
)
3223 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3226 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3227 newpat_dest
= regno_reg_rtx
[regno
];
3231 /* Cases for modifying the CC-using comparison. */
3232 if (compare_code
!= orig_compare_code
3233 /* ??? Do we need to verify the zero rtx? */
3234 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3236 /* Replace cc_use_loc with entire new RTX. */
3238 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3239 newpat_dest
, const0_rtx
));
3240 undobuf
.other_insn
= cc_use_insn
;
3242 else if (compare_mode
!= orig_compare_mode
)
3244 /* Just replace the CC reg with a new mode. */
3245 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3246 undobuf
.other_insn
= cc_use_insn
;
3250 /* Now we modify the current newpat:
3251 First, SET_DEST(newpat) is updated if the CC mode has been
3252 altered. For targets without SELECT_CC_MODE, this should be
3254 if (compare_mode
!= orig_compare_mode
)
3255 SUBST (SET_DEST (newpat
), newpat_dest
);
3256 /* This is always done to propagate i2src into newpat. */
3257 SUBST (SET_SRC (newpat
),
3258 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3259 /* Create new version of i2pat if needed; the below PARALLEL
3260 creation needs this to work correctly. */
3261 if (! rtx_equal_p (i2src
, op0
))
3262 i2pat
= gen_rtx_SET (i2dest
, op0
);
3267 if (i2_is_used
== 0)
3269 /* It is possible that the source of I2 or I1 may be performing
3270 an unneeded operation, such as a ZERO_EXTEND of something
3271 that is known to have the high part zero. Handle that case
3272 by letting subst look at the inner insns.
3274 Another way to do this would be to have a function that tries
3275 to simplify a single insn instead of merging two or more
3276 insns. We don't do this because of the potential of infinite
3277 loops and because of the potential extra memory required.
3278 However, doing it the way we are is a bit of a kludge and
3279 doesn't catch all cases.
3281 But only do this if -fexpensive-optimizations since it slows
3282 things down and doesn't usually win.
3284 This is not done in the COMPARE case above because the
3285 unmodified I2PAT is used in the PARALLEL and so a pattern
3286 with a modified I2SRC would not match. */
3288 if (flag_expensive_optimizations
)
3290 /* Pass pc_rtx so no substitutions are done, just
3294 subst_low_luid
= DF_INSN_LUID (i1
);
3295 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3298 subst_low_luid
= DF_INSN_LUID (i2
);
3299 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3302 n_occurrences
= 0; /* `subst' counts here */
3303 subst_low_luid
= DF_INSN_LUID (i2
);
3305 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3306 copy of I2SRC each time we substitute it, in order to avoid creating
3307 self-referential RTL when we will be substituting I1SRC for I1DEST
3308 later. Likewise if I0 feeds into I2, either directly or indirectly
3309 through I1, and I0DEST is in I0SRC. */
3310 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3311 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3312 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3313 && i0dest_in_i0src
));
3316 /* Record whether I2's body now appears within I3's body. */
3317 i2_is_used
= n_occurrences
;
3320 /* If we already got a failure, don't try to do more. Otherwise, try to
3321 substitute I1 if we have it. */
3323 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3325 /* Check that an autoincrement side-effect on I1 has not been lost.
3326 This happens if I1DEST is mentioned in I2 and dies there, and
3327 has disappeared from the new pattern. */
3328 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3330 && dead_or_set_p (i2
, i1dest
)
3331 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3332 /* Before we can do this substitution, we must redo the test done
3333 above (see detailed comments there) that ensures I1DEST isn't
3334 mentioned in any SETs in NEWPAT that are field assignments. */
3335 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3343 subst_low_luid
= DF_INSN_LUID (i1
);
3345 /* If the following substitution will modify I1SRC, make a copy of it
3346 for the case where it is substituted for I1DEST in I2PAT later. */
3347 if (added_sets_2
&& i1_feeds_i2_n
)
3348 i1src_copy
= copy_rtx (i1src
);
3350 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3351 copy of I1SRC each time we substitute it, in order to avoid creating
3352 self-referential RTL when we will be substituting I0SRC for I0DEST
3354 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3355 i0_feeds_i1_n
&& i0dest_in_i0src
);
3358 /* Record whether I1's body now appears within I3's body. */
3359 i1_is_used
= n_occurrences
;
3362 /* Likewise for I0 if we have it. */
3364 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3366 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3367 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3368 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3369 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3370 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3377 /* If the following substitution will modify I0SRC, make a copy of it
3378 for the case where it is substituted for I0DEST in I1PAT later. */
3379 if (added_sets_1
&& i0_feeds_i1_n
)
3380 i0src_copy
= copy_rtx (i0src
);
3381 /* And a copy for I0DEST in I2PAT substitution. */
3382 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3383 || (i0_feeds_i2_n
)))
3384 i0src_copy2
= copy_rtx (i0src
);
3387 subst_low_luid
= DF_INSN_LUID (i0
);
3388 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3392 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3393 to count all the ways that I2SRC and I1SRC can be used. */
3394 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3395 && i2_is_used
+ added_sets_2
> 1)
3396 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3397 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3399 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3400 && (n_occurrences
+ added_sets_0
3401 + (added_sets_1
&& i0_feeds_i1_n
)
3402 + (added_sets_2
&& i0_feeds_i2_n
)
3404 /* Fail if we tried to make a new register. */
3405 || max_reg_num () != maxreg
3406 /* Fail if we couldn't do something and have a CLOBBER. */
3407 || GET_CODE (newpat
) == CLOBBER
3408 /* Fail if this new pattern is a MULT and we didn't have one before
3409 at the outer level. */
3410 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3417 /* If the actions of the earlier insns must be kept
3418 in addition to substituting them into the latest one,
3419 we must make a new PARALLEL for the latest insn
3420 to hold additional the SETs. */
3422 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3424 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3427 if (GET_CODE (newpat
) == PARALLEL
)
3429 rtvec old
= XVEC (newpat
, 0);
3430 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3431 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3432 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3433 sizeof (old
->elem
[0]) * old
->num_elem
);
3438 total_sets
= 1 + extra_sets
;
3439 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3440 XVECEXP (newpat
, 0, 0) = old
;
3444 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3450 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3452 XVECEXP (newpat
, 0, --total_sets
) = t
;
3458 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3459 i0_feeds_i1_n
&& i0dest_in_i0src
);
3460 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3461 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3463 XVECEXP (newpat
, 0, --total_sets
) = t
;
3467 validate_replacement
:
3469 /* Note which hard regs this insn has as inputs. */
3470 mark_used_regs_combine (newpat
);
3472 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3473 consider splitting this pattern, we might need these clobbers. */
3474 if (i1
&& GET_CODE (newpat
) == PARALLEL
3475 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3477 int len
= XVECLEN (newpat
, 0);
3479 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3480 for (i
= 0; i
< len
; i
++)
3481 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3484 /* We have recognized nothing yet. */
3485 insn_code_number
= -1;
3487 /* See if this is a PARALLEL of two SETs where one SET's destination is
3488 a register that is unused and this isn't marked as an instruction that
3489 might trap in an EH region. In that case, we just need the other SET.
3490 We prefer this over the PARALLEL.
3492 This can occur when simplifying a divmod insn. We *must* test for this
3493 case here because the code below that splits two independent SETs doesn't
3494 handle this case correctly when it updates the register status.
3496 It's pointless doing this if we originally had two sets, one from
3497 i3, and one from i2. Combining then splitting the parallel results
3498 in the original i2 again plus an invalid insn (which we delete).
3499 The net effect is only to move instructions around, which makes
3500 debug info less accurate. */
3502 if (!(added_sets_2
&& i1
== 0)
3503 && is_parallel_of_n_reg_sets (newpat
, 2)
3504 && asm_noperands (newpat
) < 0)
3506 rtx set0
= XVECEXP (newpat
, 0, 0);
3507 rtx set1
= XVECEXP (newpat
, 0, 1);
3508 rtx oldpat
= newpat
;
3510 if (((REG_P (SET_DEST (set1
))
3511 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3512 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3513 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3514 && insn_nothrow_p (i3
)
3515 && !side_effects_p (SET_SRC (set1
)))
3518 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3521 else if (((REG_P (SET_DEST (set0
))
3522 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3523 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3524 && find_reg_note (i3
, REG_UNUSED
,
3525 SUBREG_REG (SET_DEST (set0
)))))
3526 && insn_nothrow_p (i3
)
3527 && !side_effects_p (SET_SRC (set0
)))
3530 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3532 if (insn_code_number
>= 0)
3533 changed_i3_dest
= 1;
3536 if (insn_code_number
< 0)
3540 /* Is the result of combination a valid instruction? */
3541 if (insn_code_number
< 0)
3542 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3544 /* If we were combining three insns and the result is a simple SET
3545 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3546 insns. There are two ways to do this. It can be split using a
3547 machine-specific method (like when you have an addition of a large
3548 constant) or by combine in the function find_split_point. */
3550 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3551 && asm_noperands (newpat
) < 0)
3553 rtx parallel
, *split
;
3554 rtx_insn
*m_split_insn
;
3556 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3557 use I2DEST as a scratch register will help. In the latter case,
3558 convert I2DEST to the mode of the source of NEWPAT if we can. */
3560 m_split_insn
= combine_split_insns (newpat
, i3
);
3562 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3563 inputs of NEWPAT. */
3565 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3566 possible to try that as a scratch reg. This would require adding
3567 more code to make it work though. */
3569 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3571 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3573 /* First try to split using the original register as a
3574 scratch register. */
3575 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3576 gen_rtvec (2, newpat
,
3577 gen_rtx_CLOBBER (VOIDmode
,
3579 m_split_insn
= combine_split_insns (parallel
, i3
);
3581 /* If that didn't work, try changing the mode of I2DEST if
3583 if (m_split_insn
== 0
3584 && new_mode
!= GET_MODE (i2dest
)
3585 && new_mode
!= VOIDmode
3586 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3588 machine_mode old_mode
= GET_MODE (i2dest
);
3591 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3592 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3595 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3596 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3599 parallel
= (gen_rtx_PARALLEL
3601 gen_rtvec (2, newpat
,
3602 gen_rtx_CLOBBER (VOIDmode
,
3604 m_split_insn
= combine_split_insns (parallel
, i3
);
3606 if (m_split_insn
== 0
3607 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3611 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3612 buf
= undobuf
.undos
;
3613 undobuf
.undos
= buf
->next
;
3614 buf
->next
= undobuf
.frees
;
3615 undobuf
.frees
= buf
;
3619 i2scratch
= m_split_insn
!= 0;
3622 /* If recog_for_combine has discarded clobbers, try to use them
3623 again for the split. */
3624 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3626 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3627 m_split_insn
= combine_split_insns (parallel
, i3
);
3630 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3632 rtx m_split_pat
= PATTERN (m_split_insn
);
3633 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3634 if (insn_code_number
>= 0)
3635 newpat
= m_split_pat
;
3637 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3638 && (next_nonnote_nondebug_insn (i2
) == i3
3639 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3642 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3643 newi2pat
= PATTERN (m_split_insn
);
3645 i3set
= single_set (NEXT_INSN (m_split_insn
));
3646 i2set
= single_set (m_split_insn
);
3648 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3650 /* If I2 or I3 has multiple SETs, we won't know how to track
3651 register status, so don't use these insns. If I2's destination
3652 is used between I2 and I3, we also can't use these insns. */
3654 if (i2_code_number
>= 0 && i2set
&& i3set
3655 && (next_nonnote_nondebug_insn (i2
) == i3
3656 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3657 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3659 if (insn_code_number
>= 0)
3662 /* It is possible that both insns now set the destination of I3.
3663 If so, we must show an extra use of it. */
3665 if (insn_code_number
>= 0)
3667 rtx new_i3_dest
= SET_DEST (i3set
);
3668 rtx new_i2_dest
= SET_DEST (i2set
);
3670 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3671 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3672 || GET_CODE (new_i3_dest
) == SUBREG
)
3673 new_i3_dest
= XEXP (new_i3_dest
, 0);
3675 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3676 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3677 || GET_CODE (new_i2_dest
) == SUBREG
)
3678 new_i2_dest
= XEXP (new_i2_dest
, 0);
3680 if (REG_P (new_i3_dest
)
3681 && REG_P (new_i2_dest
)
3682 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3683 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3684 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3688 /* If we can split it and use I2DEST, go ahead and see if that
3689 helps things be recognized. Verify that none of the registers
3690 are set between I2 and I3. */
3691 if (insn_code_number
< 0
3692 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3693 && (!HAVE_cc0
|| REG_P (i2dest
))
3694 /* We need I2DEST in the proper mode. If it is a hard register
3695 or the only use of a pseudo, we can change its mode.
3696 Make sure we don't change a hard register to have a mode that
3697 isn't valid for it, or change the number of registers. */
3698 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3699 || GET_MODE (*split
) == VOIDmode
3700 || can_change_dest_mode (i2dest
, added_sets_2
,
3702 && (next_nonnote_nondebug_insn (i2
) == i3
3703 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3704 /* We can't overwrite I2DEST if its value is still used by
3706 && ! reg_referenced_p (i2dest
, newpat
))
3708 rtx newdest
= i2dest
;
3709 enum rtx_code split_code
= GET_CODE (*split
);
3710 machine_mode split_mode
= GET_MODE (*split
);
3711 bool subst_done
= false;
3712 newi2pat
= NULL_RTX
;
3716 /* *SPLIT may be part of I2SRC, so make sure we have the
3717 original expression around for later debug processing.
3718 We should not need I2SRC any more in other cases. */
3719 if (MAY_HAVE_DEBUG_INSNS
)
3720 i2src
= copy_rtx (i2src
);
3724 /* Get NEWDEST as a register in the proper mode. We have already
3725 validated that we can do this. */
3726 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3728 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3729 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3732 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3733 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3737 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3738 an ASHIFT. This can occur if it was inside a PLUS and hence
3739 appeared to be a memory address. This is a kludge. */
3740 if (split_code
== MULT
3741 && CONST_INT_P (XEXP (*split
, 1))
3742 && INTVAL (XEXP (*split
, 1)) > 0
3743 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3745 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3746 XEXP (*split
, 0), GEN_INT (i
)));
3747 /* Update split_code because we may not have a multiply
3749 split_code
= GET_CODE (*split
);
3752 #ifdef INSN_SCHEDULING
3753 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3754 be written as a ZERO_EXTEND. */
3755 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3757 #ifdef LOAD_EXTEND_OP
3758 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3759 what it really is. */
3760 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split
)))
3762 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3763 SUBREG_REG (*split
)));
3766 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3767 SUBREG_REG (*split
)));
3771 /* Attempt to split binary operators using arithmetic identities. */
3772 if (BINARY_P (SET_SRC (newpat
))
3773 && split_mode
== GET_MODE (SET_SRC (newpat
))
3774 && ! side_effects_p (SET_SRC (newpat
)))
3776 rtx setsrc
= SET_SRC (newpat
);
3777 machine_mode mode
= GET_MODE (setsrc
);
3778 enum rtx_code code
= GET_CODE (setsrc
);
3779 rtx src_op0
= XEXP (setsrc
, 0);
3780 rtx src_op1
= XEXP (setsrc
, 1);
3782 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3783 if (rtx_equal_p (src_op0
, src_op1
))
3785 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3786 SUBST (XEXP (setsrc
, 0), newdest
);
3787 SUBST (XEXP (setsrc
, 1), newdest
);
3790 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3791 else if ((code
== PLUS
|| code
== MULT
)
3792 && GET_CODE (src_op0
) == code
3793 && GET_CODE (XEXP (src_op0
, 0)) == code
3794 && (INTEGRAL_MODE_P (mode
)
3795 || (FLOAT_MODE_P (mode
)
3796 && flag_unsafe_math_optimizations
)))
3798 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3799 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3800 rtx r
= XEXP (src_op0
, 1);
3803 /* Split both "((X op Y) op X) op Y" and
3804 "((X op Y) op Y) op X" as "T op T" where T is
3806 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3807 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3809 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3810 SUBST (XEXP (setsrc
, 0), newdest
);
3811 SUBST (XEXP (setsrc
, 1), newdest
);
3814 /* Split "((X op X) op Y) op Y)" as "T op T" where
3816 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3818 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3819 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3820 SUBST (XEXP (setsrc
, 0), newdest
);
3821 SUBST (XEXP (setsrc
, 1), newdest
);
3829 newi2pat
= gen_rtx_SET (newdest
, *split
);
3830 SUBST (*split
, newdest
);
3833 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3835 /* recog_for_combine might have added CLOBBERs to newi2pat.
3836 Make sure NEWPAT does not depend on the clobbered regs. */
3837 if (GET_CODE (newi2pat
) == PARALLEL
)
3838 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3839 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3841 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3842 if (reg_overlap_mentioned_p (reg
, newpat
))
3849 /* If the split point was a MULT and we didn't have one before,
3850 don't use one now. */
3851 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3852 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3856 /* Check for a case where we loaded from memory in a narrow mode and
3857 then sign extended it, but we need both registers. In that case,
3858 we have a PARALLEL with both loads from the same memory location.
3859 We can split this into a load from memory followed by a register-register
3860 copy. This saves at least one insn, more if register allocation can
3863 We cannot do this if the destination of the first assignment is a
3864 condition code register or cc0. We eliminate this case by making sure
3865 the SET_DEST and SET_SRC have the same mode.
3867 We cannot do this if the destination of the second assignment is
3868 a register that we have already assumed is zero-extended. Similarly
3869 for a SUBREG of such a register. */
3871 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3872 && GET_CODE (newpat
) == PARALLEL
3873 && XVECLEN (newpat
, 0) == 2
3874 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3875 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3876 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3877 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3878 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3879 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3880 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3881 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3883 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3884 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3885 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3887 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3888 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3889 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3890 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3891 != GET_MODE_MASK (word_mode
))))
3892 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3893 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3895 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3896 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3897 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3898 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3899 != GET_MODE_MASK (word_mode
)))))
3900 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3901 SET_SRC (XVECEXP (newpat
, 0, 1)))
3902 && ! find_reg_note (i3
, REG_UNUSED
,
3903 SET_DEST (XVECEXP (newpat
, 0, 0))))
3907 newi2pat
= XVECEXP (newpat
, 0, 0);
3908 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3909 newpat
= XVECEXP (newpat
, 0, 1);
3910 SUBST (SET_SRC (newpat
),
3911 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3912 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3914 if (i2_code_number
>= 0)
3915 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3917 if (insn_code_number
>= 0)
3921 /* Similarly, check for a case where we have a PARALLEL of two independent
3922 SETs but we started with three insns. In this case, we can do the sets
3923 as two separate insns. This case occurs when some SET allows two
3924 other insns to combine, but the destination of that SET is still live.
3926 Also do this if we started with two insns and (at least) one of the
3927 resulting sets is a noop; this noop will be deleted later. */
3929 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3930 && GET_CODE (newpat
) == PARALLEL
3931 && XVECLEN (newpat
, 0) == 2
3932 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3933 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3934 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3935 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3936 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3937 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3938 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3939 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3940 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3941 XVECEXP (newpat
, 0, 0))
3942 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3943 XVECEXP (newpat
, 0, 1))
3944 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3945 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3947 rtx set0
= XVECEXP (newpat
, 0, 0);
3948 rtx set1
= XVECEXP (newpat
, 0, 1);
3950 /* Normally, it doesn't matter which of the two is done first,
3951 but the one that references cc0 can't be the second, and
3952 one which uses any regs/memory set in between i2 and i3 can't
3953 be first. The PARALLEL might also have been pre-existing in i3,
3954 so we need to make sure that we won't wrongly hoist a SET to i2
3955 that would conflict with a death note present in there. */
3956 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3957 && !(REG_P (SET_DEST (set1
))
3958 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3959 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3960 && find_reg_note (i2
, REG_DEAD
,
3961 SUBREG_REG (SET_DEST (set1
))))
3962 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3963 /* If I3 is a jump, ensure that set0 is a jump so that
3964 we do not create invalid RTL. */
3965 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3971 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3972 && !(REG_P (SET_DEST (set0
))
3973 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3974 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3975 && find_reg_note (i2
, REG_DEAD
,
3976 SUBREG_REG (SET_DEST (set0
))))
3977 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
3978 /* If I3 is a jump, ensure that set1 is a jump so that
3979 we do not create invalid RTL. */
3980 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
3992 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3994 if (i2_code_number
>= 0)
3996 /* recog_for_combine might have added CLOBBERs to newi2pat.
3997 Make sure NEWPAT does not depend on the clobbered regs. */
3998 if (GET_CODE (newi2pat
) == PARALLEL
)
4000 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4001 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4003 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4004 if (reg_overlap_mentioned_p (reg
, newpat
))
4012 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4016 /* If it still isn't recognized, fail and change things back the way they
4018 if ((insn_code_number
< 0
4019 /* Is the result a reasonable ASM_OPERANDS? */
4020 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4026 /* If we had to change another insn, make sure it is valid also. */
4027 if (undobuf
.other_insn
)
4029 CLEAR_HARD_REG_SET (newpat_used_regs
);
4031 other_pat
= PATTERN (undobuf
.other_insn
);
4032 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4035 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4042 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4043 they are adjacent to each other or not. */
4046 rtx_insn
*p
= prev_nonnote_insn (i3
);
4047 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4048 && sets_cc0_p (newi2pat
))
4055 /* Only allow this combination if insn_rtx_costs reports that the
4056 replacement instructions are cheaper than the originals. */
4057 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4063 if (MAY_HAVE_DEBUG_INSNS
)
4067 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4068 if (undo
->kind
== UNDO_MODE
)
4070 rtx reg
= *undo
->where
.r
;
4071 machine_mode new_mode
= GET_MODE (reg
);
4072 machine_mode old_mode
= undo
->old_contents
.m
;
4074 /* Temporarily revert mode back. */
4075 adjust_reg_mode (reg
, old_mode
);
4077 if (reg
== i2dest
&& i2scratch
)
4079 /* If we used i2dest as a scratch register with a
4080 different mode, substitute it for the original
4081 i2src while its original mode is temporarily
4082 restored, and then clear i2scratch so that we don't
4083 do it again later. */
4084 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4087 /* Put back the new mode. */
4088 adjust_reg_mode (reg
, new_mode
);
4092 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4093 rtx_insn
*first
, *last
;
4098 last
= last_combined_insn
;
4103 last
= undobuf
.other_insn
;
4105 if (DF_INSN_LUID (last
)
4106 < DF_INSN_LUID (last_combined_insn
))
4107 last
= last_combined_insn
;
4110 /* We're dealing with a reg that changed mode but not
4111 meaning, so we want to turn it into a subreg for
4112 the new mode. However, because of REG sharing and
4113 because its mode had already changed, we have to do
4114 it in two steps. First, replace any debug uses of
4115 reg, with its original mode temporarily restored,
4116 with this copy we have created; then, replace the
4117 copy with the SUBREG of the original shared reg,
4118 once again changed to the new mode. */
4119 propagate_for_debug (first
, last
, reg
, tempreg
,
4121 adjust_reg_mode (reg
, new_mode
);
4122 propagate_for_debug (first
, last
, tempreg
,
4123 lowpart_subreg (old_mode
, reg
, new_mode
),
4129 /* If we will be able to accept this, we have made a
4130 change to the destination of I3. This requires us to
4131 do a few adjustments. */
4133 if (changed_i3_dest
)
4135 PATTERN (i3
) = newpat
;
4136 adjust_for_new_dest (i3
);
4139 /* We now know that we can do this combination. Merge the insns and
4140 update the status of registers and LOG_LINKS. */
4142 if (undobuf
.other_insn
)
4146 PATTERN (undobuf
.other_insn
) = other_pat
;
4148 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4149 ensure that they are still valid. Then add any non-duplicate
4150 notes added by recog_for_combine. */
4151 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4153 next
= XEXP (note
, 1);
4155 if ((REG_NOTE_KIND (note
) == REG_DEAD
4156 && !reg_referenced_p (XEXP (note
, 0),
4157 PATTERN (undobuf
.other_insn
)))
4158 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4159 && !reg_set_p (XEXP (note
, 0),
4160 PATTERN (undobuf
.other_insn
))))
4161 remove_note (undobuf
.other_insn
, note
);
4164 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4165 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4172 struct insn_link
*link
;
4175 /* I3 now uses what used to be its destination and which is now
4176 I2's destination. This requires us to do a few adjustments. */
4177 PATTERN (i3
) = newpat
;
4178 adjust_for_new_dest (i3
);
4180 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4183 However, some later insn might be using I2's dest and have
4184 a LOG_LINK pointing at I3. We must remove this link.
4185 The simplest way to remove the link is to point it at I1,
4186 which we know will be a NOTE. */
4188 /* newi2pat is usually a SET here; however, recog_for_combine might
4189 have added some clobbers. */
4190 if (GET_CODE (newi2pat
) == PARALLEL
)
4191 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4193 ni2dest
= SET_DEST (newi2pat
);
4195 for (insn
= NEXT_INSN (i3
);
4196 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4197 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4198 insn
= NEXT_INSN (insn
))
4200 if (INSN_P (insn
) && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4202 FOR_EACH_LOG_LINK (link
, insn
)
4203 if (link
->insn
== i3
)
4212 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4213 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4216 /* Compute which registers we expect to eliminate. newi2pat may be setting
4217 either i3dest or i2dest, so we must check it. */
4218 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4219 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4222 /* For i1, we need to compute both local elimination and global
4223 elimination information with respect to newi2pat because i1dest
4224 may be the same as i3dest, in which case newi2pat may be setting
4225 i1dest. Global information is used when distributing REG_DEAD
4226 note for i2 and i3, in which case it does matter if newi2pat sets
4229 Local information is used when distributing REG_DEAD note for i1,
4230 in which case it doesn't matter if newi2pat sets i1dest or not.
4231 See PR62151, if we have four insns combination:
4233 i1: r1 <- i1src (using r0)
4235 i2: r0 <- i2src (using r1)
4236 i3: r3 <- i3src (using r0)
4238 From i1's point of view, r0 is eliminated, no matter if it is set
4239 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4240 should be discarded.
4242 Note local information only affects cases in forms like "I1->I2->I3",
4243 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4244 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4246 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4249 rtx elim_i1
= (local_elim_i1
== 0
4250 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4252 /* Same case as i1. */
4253 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4255 rtx elim_i0
= (local_elim_i0
== 0
4256 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4259 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4261 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4262 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4264 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4266 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4268 /* Ensure that we do not have something that should not be shared but
4269 occurs multiple times in the new insns. Check this by first
4270 resetting all the `used' flags and then copying anything is shared. */
4272 reset_used_flags (i3notes
);
4273 reset_used_flags (i2notes
);
4274 reset_used_flags (i1notes
);
4275 reset_used_flags (i0notes
);
4276 reset_used_flags (newpat
);
4277 reset_used_flags (newi2pat
);
4278 if (undobuf
.other_insn
)
4279 reset_used_flags (PATTERN (undobuf
.other_insn
));
4281 i3notes
= copy_rtx_if_shared (i3notes
);
4282 i2notes
= copy_rtx_if_shared (i2notes
);
4283 i1notes
= copy_rtx_if_shared (i1notes
);
4284 i0notes
= copy_rtx_if_shared (i0notes
);
4285 newpat
= copy_rtx_if_shared (newpat
);
4286 newi2pat
= copy_rtx_if_shared (newi2pat
);
4287 if (undobuf
.other_insn
)
4288 reset_used_flags (PATTERN (undobuf
.other_insn
));
4290 INSN_CODE (i3
) = insn_code_number
;
4291 PATTERN (i3
) = newpat
;
4293 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4295 rtx call_usage
= CALL_INSN_FUNCTION_USAGE (i3
);
4297 reset_used_flags (call_usage
);
4298 call_usage
= copy_rtx (call_usage
);
4302 /* I2SRC must still be meaningful at this point. Some splitting
4303 operations can invalidate I2SRC, but those operations do not
4306 replace_rtx (call_usage
, i2dest
, i2src
);
4310 replace_rtx (call_usage
, i1dest
, i1src
);
4312 replace_rtx (call_usage
, i0dest
, i0src
);
4314 CALL_INSN_FUNCTION_USAGE (i3
) = call_usage
;
4317 if (undobuf
.other_insn
)
4318 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4320 /* We had one special case above where I2 had more than one set and
4321 we replaced a destination of one of those sets with the destination
4322 of I3. In that case, we have to update LOG_LINKS of insns later
4323 in this basic block. Note that this (expensive) case is rare.
4325 Also, in this case, we must pretend that all REG_NOTEs for I2
4326 actually came from I3, so that REG_UNUSED notes from I2 will be
4327 properly handled. */
4329 if (i3_subst_into_i2
)
4331 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4332 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4333 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4334 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4335 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4336 && ! find_reg_note (i2
, REG_UNUSED
,
4337 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4338 for (temp_insn
= NEXT_INSN (i2
);
4340 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4341 || BB_HEAD (this_basic_block
) != temp_insn
);
4342 temp_insn
= NEXT_INSN (temp_insn
))
4343 if (temp_insn
!= i3
&& INSN_P (temp_insn
))
4344 FOR_EACH_LOG_LINK (link
, temp_insn
)
4345 if (link
->insn
== i2
)
4351 while (XEXP (link
, 1))
4352 link
= XEXP (link
, 1);
4353 XEXP (link
, 1) = i2notes
;
4360 LOG_LINKS (i3
) = NULL
;
4362 LOG_LINKS (i2
) = NULL
;
4367 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4368 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4370 INSN_CODE (i2
) = i2_code_number
;
4371 PATTERN (i2
) = newi2pat
;
4375 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4376 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4378 SET_INSN_DELETED (i2
);
4383 LOG_LINKS (i1
) = NULL
;
4385 if (MAY_HAVE_DEBUG_INSNS
)
4386 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4388 SET_INSN_DELETED (i1
);
4393 LOG_LINKS (i0
) = NULL
;
4395 if (MAY_HAVE_DEBUG_INSNS
)
4396 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4398 SET_INSN_DELETED (i0
);
4401 /* Get death notes for everything that is now used in either I3 or
4402 I2 and used to die in a previous insn. If we built two new
4403 patterns, move from I1 to I2 then I2 to I3 so that we get the
4404 proper movement on registers that I2 modifies. */
4407 from_luid
= DF_INSN_LUID (i0
);
4409 from_luid
= DF_INSN_LUID (i1
);
4411 from_luid
= DF_INSN_LUID (i2
);
4413 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4414 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4416 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4418 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4419 elim_i2
, elim_i1
, elim_i0
);
4421 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4422 elim_i2
, elim_i1
, elim_i0
);
4424 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4425 elim_i2
, local_elim_i1
, local_elim_i0
);
4427 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4428 elim_i2
, elim_i1
, local_elim_i0
);
4430 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4431 elim_i2
, elim_i1
, elim_i0
);
4433 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4434 know these are REG_UNUSED and want them to go to the desired insn,
4435 so we always pass it as i3. */
4437 if (newi2pat
&& new_i2_notes
)
4438 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4442 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4445 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4446 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4447 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4448 in that case, it might delete I2. Similarly for I2 and I1.
4449 Show an additional death due to the REG_DEAD note we make here. If
4450 we discard it in distribute_notes, we will decrement it again. */
4454 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4455 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4456 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4459 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4460 elim_i2
, elim_i1
, elim_i0
);
4463 if (i2dest_in_i2src
)
4465 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4466 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4467 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4468 NULL_RTX
, NULL_RTX
);
4470 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4471 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4474 if (i1dest_in_i1src
)
4476 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4477 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4478 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4479 NULL_RTX
, NULL_RTX
);
4481 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4482 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4485 if (i0dest_in_i0src
)
4487 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4488 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4489 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4490 NULL_RTX
, NULL_RTX
);
4492 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4493 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4496 distribute_links (i3links
);
4497 distribute_links (i2links
);
4498 distribute_links (i1links
);
4499 distribute_links (i0links
);
4503 struct insn_link
*link
;
4504 rtx_insn
*i2_insn
= 0;
4505 rtx i2_val
= 0, set
;
4507 /* The insn that used to set this register doesn't exist, and
4508 this life of the register may not exist either. See if one of
4509 I3's links points to an insn that sets I2DEST. If it does,
4510 that is now the last known value for I2DEST. If we don't update
4511 this and I2 set the register to a value that depended on its old
4512 contents, we will get confused. If this insn is used, thing
4513 will be set correctly in combine_instructions. */
4514 FOR_EACH_LOG_LINK (link
, i3
)
4515 if ((set
= single_set (link
->insn
)) != 0
4516 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4517 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4519 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4521 /* If the reg formerly set in I2 died only once and that was in I3,
4522 zero its use count so it won't make `reload' do any work. */
4524 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4525 && ! i2dest_in_i2src
4526 && REGNO (i2dest
) < reg_n_sets_max
)
4527 INC_REG_N_SETS (REGNO (i2dest
), -1);
4530 if (i1
&& REG_P (i1dest
))
4532 struct insn_link
*link
;
4533 rtx_insn
*i1_insn
= 0;
4534 rtx i1_val
= 0, set
;
4536 FOR_EACH_LOG_LINK (link
, i3
)
4537 if ((set
= single_set (link
->insn
)) != 0
4538 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4539 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4541 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4544 && ! i1dest_in_i1src
4545 && REGNO (i1dest
) < reg_n_sets_max
)
4546 INC_REG_N_SETS (REGNO (i1dest
), -1);
4549 if (i0
&& REG_P (i0dest
))
4551 struct insn_link
*link
;
4552 rtx_insn
*i0_insn
= 0;
4553 rtx i0_val
= 0, set
;
4555 FOR_EACH_LOG_LINK (link
, i3
)
4556 if ((set
= single_set (link
->insn
)) != 0
4557 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4558 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4560 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4563 && ! i0dest_in_i0src
4564 && REGNO (i0dest
) < reg_n_sets_max
)
4565 INC_REG_N_SETS (REGNO (i0dest
), -1);
4568 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4569 been made to this insn. The order is important, because newi2pat
4570 can affect nonzero_bits of newpat. */
4572 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4573 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4576 if (undobuf
.other_insn
!= NULL_RTX
)
4580 fprintf (dump_file
, "modifying other_insn ");
4581 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4583 df_insn_rescan (undobuf
.other_insn
);
4586 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4590 fprintf (dump_file
, "modifying insn i0 ");
4591 dump_insn_slim (dump_file
, i0
);
4593 df_insn_rescan (i0
);
4596 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4600 fprintf (dump_file
, "modifying insn i1 ");
4601 dump_insn_slim (dump_file
, i1
);
4603 df_insn_rescan (i1
);
4606 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4610 fprintf (dump_file
, "modifying insn i2 ");
4611 dump_insn_slim (dump_file
, i2
);
4613 df_insn_rescan (i2
);
4616 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4620 fprintf (dump_file
, "modifying insn i3 ");
4621 dump_insn_slim (dump_file
, i3
);
4623 df_insn_rescan (i3
);
4626 /* Set new_direct_jump_p if a new return or simple jump instruction
4627 has been created. Adjust the CFG accordingly. */
4628 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4630 *new_direct_jump_p
= 1;
4631 mark_jump_label (PATTERN (i3
), i3
, 0);
4632 update_cfg_for_uncondjump (i3
);
4635 if (undobuf
.other_insn
!= NULL_RTX
4636 && (returnjump_p (undobuf
.other_insn
)
4637 || any_uncondjump_p (undobuf
.other_insn
)))
4639 *new_direct_jump_p
= 1;
4640 update_cfg_for_uncondjump (undobuf
.other_insn
);
4643 /* A noop might also need cleaning up of CFG, if it comes from the
4644 simplification of a jump. */
4646 && GET_CODE (newpat
) == SET
4647 && SET_SRC (newpat
) == pc_rtx
4648 && SET_DEST (newpat
) == pc_rtx
)
4650 *new_direct_jump_p
= 1;
4651 update_cfg_for_uncondjump (i3
);
4654 if (undobuf
.other_insn
!= NULL_RTX
4655 && JUMP_P (undobuf
.other_insn
)
4656 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4657 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4658 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4660 *new_direct_jump_p
= 1;
4661 update_cfg_for_uncondjump (undobuf
.other_insn
);
4664 combine_successes
++;
4667 if (added_links_insn
4668 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4669 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4670 return added_links_insn
;
4672 return newi2pat
? i2
: i3
;
4675 /* Get a marker for undoing to the current state. */
4678 get_undo_marker (void)
4680 return undobuf
.undos
;
4683 /* Undo the modifications up to the marker. */
4686 undo_to_marker (void *marker
)
4688 struct undo
*undo
, *next
;
4690 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4698 *undo
->where
.r
= undo
->old_contents
.r
;
4701 *undo
->where
.i
= undo
->old_contents
.i
;
4704 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4707 *undo
->where
.l
= undo
->old_contents
.l
;
4713 undo
->next
= undobuf
.frees
;
4714 undobuf
.frees
= undo
;
4717 undobuf
.undos
= (struct undo
*) marker
;
4720 /* Undo all the modifications recorded in undobuf. */
4728 /* We've committed to accepting the changes we made. Move all
4729 of the undos to the free list. */
4734 struct undo
*undo
, *next
;
4736 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4739 undo
->next
= undobuf
.frees
;
4740 undobuf
.frees
= undo
;
4745 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4746 where we have an arithmetic expression and return that point. LOC will
4749 try_combine will call this function to see if an insn can be split into
4753 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4756 enum rtx_code code
= GET_CODE (x
);
4758 unsigned HOST_WIDE_INT len
= 0;
4759 HOST_WIDE_INT pos
= 0;
4761 rtx inner
= NULL_RTX
;
4763 /* First special-case some codes. */
4767 #ifdef INSN_SCHEDULING
4768 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4770 if (MEM_P (SUBREG_REG (x
)))
4773 return find_split_point (&SUBREG_REG (x
), insn
, false);
4777 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4778 using LO_SUM and HIGH. */
4779 if (GET_CODE (XEXP (x
, 0)) == CONST
4780 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
4782 machine_mode address_mode
= get_address_mode (x
);
4785 gen_rtx_LO_SUM (address_mode
,
4786 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4788 return &XEXP (XEXP (x
, 0), 0);
4792 /* If we have a PLUS whose second operand is a constant and the
4793 address is not valid, perhaps will can split it up using
4794 the machine-specific way to split large constants. We use
4795 the first pseudo-reg (one of the virtual regs) as a placeholder;
4796 it will not remain in the result. */
4797 if (GET_CODE (XEXP (x
, 0)) == PLUS
4798 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4799 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4800 MEM_ADDR_SPACE (x
)))
4802 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4803 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4806 /* This should have produced two insns, each of which sets our
4807 placeholder. If the source of the second is a valid address,
4808 we can make put both sources together and make a split point
4812 && NEXT_INSN (seq
) != NULL_RTX
4813 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4814 && NONJUMP_INSN_P (seq
)
4815 && GET_CODE (PATTERN (seq
)) == SET
4816 && SET_DEST (PATTERN (seq
)) == reg
4817 && ! reg_mentioned_p (reg
,
4818 SET_SRC (PATTERN (seq
)))
4819 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4820 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4821 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4822 && memory_address_addr_space_p
4823 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4824 MEM_ADDR_SPACE (x
)))
4826 rtx src1
= SET_SRC (PATTERN (seq
));
4827 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4829 /* Replace the placeholder in SRC2 with SRC1. If we can
4830 find where in SRC2 it was placed, that can become our
4831 split point and we can replace this address with SRC2.
4832 Just try two obvious places. */
4834 src2
= replace_rtx (src2
, reg
, src1
);
4836 if (XEXP (src2
, 0) == src1
)
4837 split
= &XEXP (src2
, 0);
4838 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4839 && XEXP (XEXP (src2
, 0), 0) == src1
)
4840 split
= &XEXP (XEXP (src2
, 0), 0);
4844 SUBST (XEXP (x
, 0), src2
);
4849 /* If that didn't work, perhaps the first operand is complex and
4850 needs to be computed separately, so make a split point there.
4851 This will occur on machines that just support REG + CONST
4852 and have a constant moved through some previous computation. */
4854 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4855 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4856 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4857 return &XEXP (XEXP (x
, 0), 0);
4860 /* If we have a PLUS whose first operand is complex, try computing it
4861 separately by making a split there. */
4862 if (GET_CODE (XEXP (x
, 0)) == PLUS
4863 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4865 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4866 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4867 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4868 return &XEXP (XEXP (x
, 0), 0);
4872 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4873 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4874 we need to put the operand into a register. So split at that
4877 if (SET_DEST (x
) == cc0_rtx
4878 && GET_CODE (SET_SRC (x
)) != COMPARE
4879 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4880 && !OBJECT_P (SET_SRC (x
))
4881 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4882 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4883 return &SET_SRC (x
);
4885 /* See if we can split SET_SRC as it stands. */
4886 split
= find_split_point (&SET_SRC (x
), insn
, true);
4887 if (split
&& split
!= &SET_SRC (x
))
4890 /* See if we can split SET_DEST as it stands. */
4891 split
= find_split_point (&SET_DEST (x
), insn
, false);
4892 if (split
&& split
!= &SET_DEST (x
))
4895 /* See if this is a bitfield assignment with everything constant. If
4896 so, this is an IOR of an AND, so split it into that. */
4897 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4898 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x
), 0)))
4899 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4900 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4901 && CONST_INT_P (SET_SRC (x
))
4902 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4903 + INTVAL (XEXP (SET_DEST (x
), 2)))
4904 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0))))
4905 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4907 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4908 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4909 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4910 rtx dest
= XEXP (SET_DEST (x
), 0);
4911 machine_mode mode
= GET_MODE (dest
);
4912 unsigned HOST_WIDE_INT mask
4913 = ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
4916 if (BITS_BIG_ENDIAN
)
4917 pos
= GET_MODE_PRECISION (mode
) - len
- pos
;
4919 or_mask
= gen_int_mode (src
<< pos
, mode
);
4922 simplify_gen_binary (IOR
, mode
, dest
, or_mask
));
4925 rtx negmask
= gen_int_mode (~(mask
<< pos
), mode
);
4927 simplify_gen_binary (IOR
, mode
,
4928 simplify_gen_binary (AND
, mode
,
4933 SUBST (SET_DEST (x
), dest
);
4935 split
= find_split_point (&SET_SRC (x
), insn
, true);
4936 if (split
&& split
!= &SET_SRC (x
))
4940 /* Otherwise, see if this is an operation that we can split into two.
4941 If so, try to split that. */
4942 code
= GET_CODE (SET_SRC (x
));
4947 /* If we are AND'ing with a large constant that is only a single
4948 bit and the result is only being used in a context where we
4949 need to know if it is zero or nonzero, replace it with a bit
4950 extraction. This will avoid the large constant, which might
4951 have taken more than one insn to make. If the constant were
4952 not a valid argument to the AND but took only one insn to make,
4953 this is no worse, but if it took more than one insn, it will
4956 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4957 && REG_P (XEXP (SET_SRC (x
), 0))
4958 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
4959 && REG_P (SET_DEST (x
))
4960 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
4961 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
4962 && XEXP (*split
, 0) == SET_DEST (x
)
4963 && XEXP (*split
, 1) == const0_rtx
)
4965 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
4966 XEXP (SET_SRC (x
), 0),
4967 pos
, NULL_RTX
, 1, 1, 0, 0);
4968 if (extraction
!= 0)
4970 SUBST (SET_SRC (x
), extraction
);
4971 return find_split_point (loc
, insn
, false);
4977 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4978 is known to be on, this can be converted into a NEG of a shift. */
4979 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
4980 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
4981 && 1 <= (pos
= exact_log2
4982 (nonzero_bits (XEXP (SET_SRC (x
), 0),
4983 GET_MODE (XEXP (SET_SRC (x
), 0))))))
4985 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
4989 gen_rtx_LSHIFTRT (mode
,
4990 XEXP (SET_SRC (x
), 0),
4993 split
= find_split_point (&SET_SRC (x
), insn
, true);
4994 if (split
&& split
!= &SET_SRC (x
))
5000 inner
= XEXP (SET_SRC (x
), 0);
5002 /* We can't optimize if either mode is a partial integer
5003 mode as we don't know how many bits are significant
5005 if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_PARTIAL_INT
5006 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5010 len
= GET_MODE_PRECISION (GET_MODE (inner
));
5016 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5017 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5019 inner
= XEXP (SET_SRC (x
), 0);
5020 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5021 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5023 if (BITS_BIG_ENDIAN
)
5024 pos
= GET_MODE_PRECISION (GET_MODE (inner
)) - len
- pos
;
5025 unsignedp
= (code
== ZERO_EXTRACT
);
5034 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
)))
5036 machine_mode mode
= GET_MODE (SET_SRC (x
));
5038 /* For unsigned, we have a choice of a shift followed by an
5039 AND or two shifts. Use two shifts for field sizes where the
5040 constant might be too large. We assume here that we can
5041 always at least get 8-bit constants in an AND insn, which is
5042 true for every current RISC. */
5044 if (unsignedp
&& len
<= 8)
5046 unsigned HOST_WIDE_INT mask
5047 = ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
5051 (mode
, gen_lowpart (mode
, inner
),
5053 gen_int_mode (mask
, mode
)));
5055 split
= find_split_point (&SET_SRC (x
), insn
, true);
5056 if (split
&& split
!= &SET_SRC (x
))
5063 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5064 gen_rtx_ASHIFT (mode
,
5065 gen_lowpart (mode
, inner
),
5066 GEN_INT (GET_MODE_PRECISION (mode
)
5068 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5070 split
= find_split_point (&SET_SRC (x
), insn
, true);
5071 if (split
&& split
!= &SET_SRC (x
))
5076 /* See if this is a simple operation with a constant as the second
5077 operand. It might be that this constant is out of range and hence
5078 could be used as a split point. */
5079 if (BINARY_P (SET_SRC (x
))
5080 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5081 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5082 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5083 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5084 return &XEXP (SET_SRC (x
), 1);
5086 /* Finally, see if this is a simple operation with its first operand
5087 not in a register. The operation might require this operand in a
5088 register, so return it as a split point. We can always do this
5089 because if the first operand were another operation, we would have
5090 already found it as a split point. */
5091 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5092 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5093 return &XEXP (SET_SRC (x
), 0);
5099 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5100 it is better to write this as (not (ior A B)) so we can split it.
5101 Similarly for IOR. */
5102 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5105 gen_rtx_NOT (GET_MODE (x
),
5106 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5108 XEXP (XEXP (x
, 0), 0),
5109 XEXP (XEXP (x
, 1), 0))));
5110 return find_split_point (loc
, insn
, set_src
);
5113 /* Many RISC machines have a large set of logical insns. If the
5114 second operand is a NOT, put it first so we will try to split the
5115 other operand first. */
5116 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5118 rtx tem
= XEXP (x
, 0);
5119 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5120 SUBST (XEXP (x
, 1), tem
);
5126 /* Canonicalization can produce (minus A (mult B C)), where C is a
5127 constant. It may be better to try splitting (plus (mult B -C) A)
5128 instead if this isn't a multiply by a power of two. */
5129 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5130 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5131 && exact_log2 (INTVAL (XEXP (XEXP (x
, 1), 1))) < 0)
5133 machine_mode mode
= GET_MODE (x
);
5134 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5135 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5136 SUBST (*loc
, gen_rtx_PLUS (mode
,
5138 XEXP (XEXP (x
, 1), 0),
5139 gen_int_mode (other_int
,
5142 return find_split_point (loc
, insn
, set_src
);
5145 /* Split at a multiply-accumulate instruction. However if this is
5146 the SET_SRC, we likely do not have such an instruction and it's
5147 worthless to try this split. */
5149 && (GET_CODE (XEXP (x
, 0)) == MULT
5150 || GET_CODE (XEXP (x
, 0)) == ASHIFT
))
5157 /* Otherwise, select our actions depending on our rtx class. */
5158 switch (GET_RTX_CLASS (code
))
5160 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5162 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5165 /* ... fall through ... */
5167 case RTX_COMM_ARITH
:
5169 case RTX_COMM_COMPARE
:
5170 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5173 /* ... fall through ... */
5175 /* Some machines have (and (shift ...) ...) insns. If X is not
5176 an AND, but XEXP (X, 0) is, use it as our split point. */
5177 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5178 return &XEXP (x
, 0);
5180 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5186 /* Otherwise, we don't have a split point. */
5191 /* Throughout X, replace FROM with TO, and return the result.
5192 The result is TO if X is FROM;
5193 otherwise the result is X, but its contents may have been modified.
5194 If they were modified, a record was made in undobuf so that
5195 undo_all will (among other things) return X to its original state.
5197 If the number of changes necessary is too much to record to undo,
5198 the excess changes are not made, so the result is invalid.
5199 The changes already made can still be undone.
5200 undobuf.num_undo is incremented for such changes, so by testing that
5201 the caller can tell whether the result is valid.
5203 `n_occurrences' is incremented each time FROM is replaced.
5205 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5207 IN_COND is nonzero if we are at the top level of a condition.
5209 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5210 by copying if `n_occurrences' is nonzero. */
5213 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5215 enum rtx_code code
= GET_CODE (x
);
5216 machine_mode op0_mode
= VOIDmode
;
5221 /* Two expressions are equal if they are identical copies of a shared
5222 RTX or if they are both registers with the same register number
5225 #define COMBINE_RTX_EQUAL_P(X,Y) \
5227 || (REG_P (X) && REG_P (Y) \
5228 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5230 /* Do not substitute into clobbers of regs -- this will never result in
5232 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5235 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5238 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5241 /* If X and FROM are the same register but different modes, they
5242 will not have been seen as equal above. However, the log links code
5243 will make a LOG_LINKS entry for that case. If we do nothing, we
5244 will try to rerecognize our original insn and, when it succeeds,
5245 we will delete the feeding insn, which is incorrect.
5247 So force this insn not to match in this (rare) case. */
5248 if (! in_dest
&& code
== REG
&& REG_P (from
)
5249 && reg_overlap_mentioned_p (x
, from
))
5250 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5252 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5253 of which may contain things that can be combined. */
5254 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5257 /* It is possible to have a subexpression appear twice in the insn.
5258 Suppose that FROM is a register that appears within TO.
5259 Then, after that subexpression has been scanned once by `subst',
5260 the second time it is scanned, TO may be found. If we were
5261 to scan TO here, we would find FROM within it and create a
5262 self-referent rtl structure which is completely wrong. */
5263 if (COMBINE_RTX_EQUAL_P (x
, to
))
5266 /* Parallel asm_operands need special attention because all of the
5267 inputs are shared across the arms. Furthermore, unsharing the
5268 rtl results in recognition failures. Failure to handle this case
5269 specially can result in circular rtl.
5271 Solve this by doing a normal pass across the first entry of the
5272 parallel, and only processing the SET_DESTs of the subsequent
5275 if (code
== PARALLEL
5276 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5277 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5279 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5281 /* If this substitution failed, this whole thing fails. */
5282 if (GET_CODE (new_rtx
) == CLOBBER
5283 && XEXP (new_rtx
, 0) == const0_rtx
)
5286 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5288 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5290 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5293 && GET_CODE (dest
) != CC0
5294 && GET_CODE (dest
) != PC
)
5296 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5298 /* If this substitution failed, this whole thing fails. */
5299 if (GET_CODE (new_rtx
) == CLOBBER
5300 && XEXP (new_rtx
, 0) == const0_rtx
)
5303 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5309 len
= GET_RTX_LENGTH (code
);
5310 fmt
= GET_RTX_FORMAT (code
);
5312 /* We don't need to process a SET_DEST that is a register, CC0,
5313 or PC, so set up to skip this common case. All other cases
5314 where we want to suppress replacing something inside a
5315 SET_SRC are handled via the IN_DEST operand. */
5317 && (REG_P (SET_DEST (x
))
5318 || GET_CODE (SET_DEST (x
)) == CC0
5319 || GET_CODE (SET_DEST (x
)) == PC
))
5322 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5325 op0_mode
= GET_MODE (XEXP (x
, 0));
5327 for (i
= 0; i
< len
; i
++)
5332 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5334 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5336 new_rtx
= (unique_copy
&& n_occurrences
5337 ? copy_rtx (to
) : to
);
5342 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5345 /* If this substitution failed, this whole thing
5347 if (GET_CODE (new_rtx
) == CLOBBER
5348 && XEXP (new_rtx
, 0) == const0_rtx
)
5352 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5355 else if (fmt
[i
] == 'e')
5357 /* If this is a register being set, ignore it. */
5358 new_rtx
= XEXP (x
, i
);
5361 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5363 || code
== STRICT_LOW_PART
))
5366 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5368 /* In general, don't install a subreg involving two
5369 modes not tieable. It can worsen register
5370 allocation, and can even make invalid reload
5371 insns, since the reg inside may need to be copied
5372 from in the outside mode, and that may be invalid
5373 if it is an fp reg copied in integer mode.
5375 We allow two exceptions to this: It is valid if
5376 it is inside another SUBREG and the mode of that
5377 SUBREG and the mode of the inside of TO is
5378 tieable and it is valid if X is a SET that copies
5381 if (GET_CODE (to
) == SUBREG
5382 && ! MODES_TIEABLE_P (GET_MODE (to
),
5383 GET_MODE (SUBREG_REG (to
)))
5384 && ! (code
== SUBREG
5385 && MODES_TIEABLE_P (GET_MODE (x
),
5386 GET_MODE (SUBREG_REG (to
))))
5388 && ! (code
== SET
&& i
== 1 && XEXP (x
, 0) == cc0_rtx
)
5391 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5395 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5396 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5399 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5401 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5405 /* If we are in a SET_DEST, suppress most cases unless we
5406 have gone inside a MEM, in which case we want to
5407 simplify the address. We assume here that things that
5408 are actually part of the destination have their inner
5409 parts in the first expression. This is true for SUBREG,
5410 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5411 things aside from REG and MEM that should appear in a
5413 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5415 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5416 || code
== ZERO_EXTRACT
))
5419 code
== IF_THEN_ELSE
&& i
== 0,
5422 /* If we found that we will have to reject this combination,
5423 indicate that by returning the CLOBBER ourselves, rather than
5424 an expression containing it. This will speed things up as
5425 well as prevent accidents where two CLOBBERs are considered
5426 to be equal, thus producing an incorrect simplification. */
5428 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5431 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5433 machine_mode mode
= GET_MODE (x
);
5435 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5436 GET_MODE (SUBREG_REG (x
)),
5439 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5441 else if (CONST_SCALAR_INT_P (new_rtx
)
5442 && GET_CODE (x
) == ZERO_EXTEND
)
5444 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5445 new_rtx
, GET_MODE (XEXP (x
, 0)));
5449 SUBST (XEXP (x
, i
), new_rtx
);
5454 /* Check if we are loading something from the constant pool via float
5455 extension; in this case we would undo compress_float_constant
5456 optimization and degenerate constant load to an immediate value. */
5457 if (GET_CODE (x
) == FLOAT_EXTEND
5458 && MEM_P (XEXP (x
, 0))
5459 && MEM_READONLY_P (XEXP (x
, 0)))
5461 rtx tmp
= avoid_constant_pool_reference (x
);
5466 /* Try to simplify X. If the simplification changed the code, it is likely
5467 that further simplification will help, so loop, but limit the number
5468 of repetitions that will be performed. */
5470 for (i
= 0; i
< 4; i
++)
5472 /* If X is sufficiently simple, don't bother trying to do anything
5474 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5475 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5477 if (GET_CODE (x
) == code
)
5480 code
= GET_CODE (x
);
5482 /* We no longer know the original mode of operand 0 since we
5483 have changed the form of X) */
5484 op0_mode
= VOIDmode
;
5490 /* Simplify X, a piece of RTL. We just operate on the expression at the
5491 outer level; call `subst' to simplify recursively. Return the new
5494 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5495 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5499 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5502 enum rtx_code code
= GET_CODE (x
);
5503 machine_mode mode
= GET_MODE (x
);
5507 /* If this is a commutative operation, put a constant last and a complex
5508 expression first. We don't need to do this for comparisons here. */
5509 if (COMMUTATIVE_ARITH_P (x
)
5510 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5513 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5514 SUBST (XEXP (x
, 1), temp
);
5517 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5518 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5519 things. Check for cases where both arms are testing the same
5522 Don't do anything if all operands are very simple. */
5525 && ((!OBJECT_P (XEXP (x
, 0))
5526 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5527 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5528 || (!OBJECT_P (XEXP (x
, 1))
5529 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5530 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5532 && (!OBJECT_P (XEXP (x
, 0))
5533 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5534 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5536 rtx cond
, true_rtx
, false_rtx
;
5538 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5540 /* If everything is a comparison, what we have is highly unlikely
5541 to be simpler, so don't use it. */
5542 && ! (COMPARISON_P (x
)
5543 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5545 rtx cop1
= const0_rtx
;
5546 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5548 if (cond_code
== NE
&& COMPARISON_P (cond
))
5551 /* Simplify the alternative arms; this may collapse the true and
5552 false arms to store-flag values. Be careful to use copy_rtx
5553 here since true_rtx or false_rtx might share RTL with x as a
5554 result of the if_then_else_cond call above. */
5555 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5556 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5558 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5559 is unlikely to be simpler. */
5560 if (general_operand (true_rtx
, VOIDmode
)
5561 && general_operand (false_rtx
, VOIDmode
))
5563 enum rtx_code reversed
;
5565 /* Restarting if we generate a store-flag expression will cause
5566 us to loop. Just drop through in this case. */
5568 /* If the result values are STORE_FLAG_VALUE and zero, we can
5569 just make the comparison operation. */
5570 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5571 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5573 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5574 && ((reversed
= reversed_comparison_code_parts
5575 (cond_code
, cond
, cop1
, NULL
))
5577 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5580 /* Likewise, we can make the negate of a comparison operation
5581 if the result values are - STORE_FLAG_VALUE and zero. */
5582 else if (CONST_INT_P (true_rtx
)
5583 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5584 && false_rtx
== const0_rtx
)
5585 x
= simplify_gen_unary (NEG
, mode
,
5586 simplify_gen_relational (cond_code
,
5590 else if (CONST_INT_P (false_rtx
)
5591 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5592 && true_rtx
== const0_rtx
5593 && ((reversed
= reversed_comparison_code_parts
5594 (cond_code
, cond
, cop1
, NULL
))
5596 x
= simplify_gen_unary (NEG
, mode
,
5597 simplify_gen_relational (reversed
,
5602 return gen_rtx_IF_THEN_ELSE (mode
,
5603 simplify_gen_relational (cond_code
,
5608 true_rtx
, false_rtx
);
5610 code
= GET_CODE (x
);
5611 op0_mode
= VOIDmode
;
5616 /* Try to fold this expression in case we have constants that weren't
5619 switch (GET_RTX_CLASS (code
))
5622 if (op0_mode
== VOIDmode
)
5623 op0_mode
= GET_MODE (XEXP (x
, 0));
5624 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5627 case RTX_COMM_COMPARE
:
5629 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5630 if (cmp_mode
== VOIDmode
)
5632 cmp_mode
= GET_MODE (XEXP (x
, 1));
5633 if (cmp_mode
== VOIDmode
)
5634 cmp_mode
= op0_mode
;
5636 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5637 XEXP (x
, 0), XEXP (x
, 1));
5640 case RTX_COMM_ARITH
:
5642 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5644 case RTX_BITFIELD_OPS
:
5646 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5647 XEXP (x
, 1), XEXP (x
, 2));
5656 code
= GET_CODE (temp
);
5657 op0_mode
= VOIDmode
;
5658 mode
= GET_MODE (temp
);
5661 /* First see if we can apply the inverse distributive law. */
5662 if (code
== PLUS
|| code
== MINUS
5663 || code
== AND
|| code
== IOR
|| code
== XOR
)
5665 x
= apply_distributive_law (x
);
5666 code
= GET_CODE (x
);
5667 op0_mode
= VOIDmode
;
5670 /* If CODE is an associative operation not otherwise handled, see if we
5671 can associate some operands. This can win if they are constants or
5672 if they are logically related (i.e. (a & b) & a). */
5673 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5674 || code
== AND
|| code
== IOR
|| code
== XOR
5675 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5676 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5677 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5679 if (GET_CODE (XEXP (x
, 0)) == code
)
5681 rtx other
= XEXP (XEXP (x
, 0), 0);
5682 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5683 rtx inner_op1
= XEXP (x
, 1);
5686 /* Make sure we pass the constant operand if any as the second
5687 one if this is a commutative operation. */
5688 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5690 rtx tem
= inner_op0
;
5691 inner_op0
= inner_op1
;
5694 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5695 : code
== DIV
? MULT
5697 mode
, inner_op0
, inner_op1
);
5699 /* For commutative operations, try the other pair if that one
5701 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5703 other
= XEXP (XEXP (x
, 0), 1);
5704 inner
= simplify_binary_operation (code
, mode
,
5705 XEXP (XEXP (x
, 0), 0),
5710 return simplify_gen_binary (code
, mode
, other
, inner
);
5714 /* A little bit of algebraic simplification here. */
5718 /* Ensure that our address has any ASHIFTs converted to MULT in case
5719 address-recognizing predicates are called later. */
5720 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5721 SUBST (XEXP (x
, 0), temp
);
5725 if (op0_mode
== VOIDmode
)
5726 op0_mode
= GET_MODE (SUBREG_REG (x
));
5728 /* See if this can be moved to simplify_subreg. */
5729 if (CONSTANT_P (SUBREG_REG (x
))
5730 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5731 /* Don't call gen_lowpart if the inner mode
5732 is VOIDmode and we cannot simplify it, as SUBREG without
5733 inner mode is invalid. */
5734 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5735 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5736 return gen_lowpart (mode
, SUBREG_REG (x
));
5738 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5742 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5747 /* If op is known to have all lower bits zero, the result is zero. */
5749 && SCALAR_INT_MODE_P (mode
)
5750 && SCALAR_INT_MODE_P (op0_mode
)
5751 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (op0_mode
)
5752 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5753 && HWI_COMPUTABLE_MODE_P (op0_mode
)
5754 && (nonzero_bits (SUBREG_REG (x
), op0_mode
)
5755 & GET_MODE_MASK (mode
)) == 0)
5756 return CONST0_RTX (mode
);
5759 /* Don't change the mode of the MEM if that would change the meaning
5761 if (MEM_P (SUBREG_REG (x
))
5762 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5763 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5764 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5765 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5767 /* Note that we cannot do any narrowing for non-constants since
5768 we might have been counting on using the fact that some bits were
5769 zero. We now do this in the SET. */
5774 temp
= expand_compound_operation (XEXP (x
, 0));
5776 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5777 replaced by (lshiftrt X C). This will convert
5778 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5780 if (GET_CODE (temp
) == ASHIFTRT
5781 && CONST_INT_P (XEXP (temp
, 1))
5782 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5783 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5784 INTVAL (XEXP (temp
, 1)));
5786 /* If X has only a single bit that might be nonzero, say, bit I, convert
5787 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5788 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5789 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5790 or a SUBREG of one since we'd be making the expression more
5791 complex if it was just a register. */
5794 && ! (GET_CODE (temp
) == SUBREG
5795 && REG_P (SUBREG_REG (temp
)))
5796 && (i
= exact_log2 (nonzero_bits (temp
, mode
))) >= 0)
5798 rtx temp1
= simplify_shift_const
5799 (NULL_RTX
, ASHIFTRT
, mode
,
5800 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, temp
,
5801 GET_MODE_PRECISION (mode
) - 1 - i
),
5802 GET_MODE_PRECISION (mode
) - 1 - i
);
5804 /* If all we did was surround TEMP with the two shifts, we
5805 haven't improved anything, so don't use it. Otherwise,
5806 we are better off with TEMP1. */
5807 if (GET_CODE (temp1
) != ASHIFTRT
5808 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5809 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5815 /* We can't handle truncation to a partial integer mode here
5816 because we don't know the real bitsize of the partial
5818 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5821 if (HWI_COMPUTABLE_MODE_P (mode
))
5823 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5824 GET_MODE_MASK (mode
), 0));
5826 /* We can truncate a constant value and return it. */
5827 if (CONST_INT_P (XEXP (x
, 0)))
5828 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5830 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5831 whose value is a comparison can be replaced with a subreg if
5832 STORE_FLAG_VALUE permits. */
5833 if (HWI_COMPUTABLE_MODE_P (mode
)
5834 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5835 && (temp
= get_last_value (XEXP (x
, 0)))
5836 && COMPARISON_P (temp
))
5837 return gen_lowpart (mode
, XEXP (x
, 0));
5841 /* (const (const X)) can become (const X). Do it this way rather than
5842 returning the inner CONST since CONST can be shared with a
5844 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5845 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5850 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5851 can add in an offset. find_split_point will split this address up
5852 again if it doesn't match. */
5853 if (GET_CODE (XEXP (x
, 0)) == HIGH
5854 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5860 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5861 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5862 bit-field and can be replaced by either a sign_extend or a
5863 sign_extract. The `and' may be a zero_extend and the two
5864 <c>, -<c> constants may be reversed. */
5865 if (GET_CODE (XEXP (x
, 0)) == XOR
5866 && CONST_INT_P (XEXP (x
, 1))
5867 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5868 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5869 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5870 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5871 && HWI_COMPUTABLE_MODE_P (mode
)
5872 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5873 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5874 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5875 == ((unsigned HOST_WIDE_INT
) 1 << (i
+ 1)) - 1))
5876 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5877 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5878 == (unsigned int) i
+ 1))))
5879 return simplify_shift_const
5880 (NULL_RTX
, ASHIFTRT
, mode
,
5881 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5882 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5883 GET_MODE_PRECISION (mode
) - (i
+ 1)),
5884 GET_MODE_PRECISION (mode
) - (i
+ 1));
5886 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5887 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5888 the bitsize of the mode - 1. This allows simplification of
5889 "a = (b & 8) == 0;" */
5890 if (XEXP (x
, 1) == constm1_rtx
5891 && !REG_P (XEXP (x
, 0))
5892 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5893 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5894 && nonzero_bits (XEXP (x
, 0), mode
) == 1)
5895 return simplify_shift_const (NULL_RTX
, ASHIFTRT
, mode
,
5896 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5897 gen_rtx_XOR (mode
, XEXP (x
, 0), const1_rtx
),
5898 GET_MODE_PRECISION (mode
) - 1),
5899 GET_MODE_PRECISION (mode
) - 1);
5901 /* If we are adding two things that have no bits in common, convert
5902 the addition into an IOR. This will often be further simplified,
5903 for example in cases like ((a & 1) + (a & 2)), which can
5906 if (HWI_COMPUTABLE_MODE_P (mode
)
5907 && (nonzero_bits (XEXP (x
, 0), mode
)
5908 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5910 /* Try to simplify the expression further. */
5911 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5912 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5914 /* If we could, great. If not, do not go ahead with the IOR
5915 replacement, since PLUS appears in many special purpose
5916 address arithmetic instructions. */
5917 if (GET_CODE (temp
) != CLOBBER
5918 && (GET_CODE (temp
) != IOR
5919 || ((XEXP (temp
, 0) != XEXP (x
, 0)
5920 || XEXP (temp
, 1) != XEXP (x
, 1))
5921 && (XEXP (temp
, 0) != XEXP (x
, 1)
5922 || XEXP (temp
, 1) != XEXP (x
, 0)))))
5928 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5929 (and <foo> (const_int pow2-1)) */
5930 if (GET_CODE (XEXP (x
, 1)) == AND
5931 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
5932 && exact_log2 (-UINTVAL (XEXP (XEXP (x
, 1), 1))) >= 0
5933 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
5934 return simplify_and_const_int (NULL_RTX
, mode
, XEXP (x
, 0),
5935 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
5939 /* If we have (mult (plus A B) C), apply the distributive law and then
5940 the inverse distributive law to see if things simplify. This
5941 occurs mostly in addresses, often when unrolling loops. */
5943 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
5945 rtx result
= distribute_and_simplify_rtx (x
, 0);
5950 /* Try simplify a*(b/c) as (a*b)/c. */
5951 if (FLOAT_MODE_P (mode
) && flag_associative_math
5952 && GET_CODE (XEXP (x
, 0)) == DIV
)
5954 rtx tem
= simplify_binary_operation (MULT
, mode
,
5955 XEXP (XEXP (x
, 0), 0),
5958 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
5963 /* If this is a divide by a power of two, treat it as a shift if
5964 its first operand is a shift. */
5965 if (CONST_INT_P (XEXP (x
, 1))
5966 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
5967 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
5968 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
5969 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
5970 || GET_CODE (XEXP (x
, 0)) == ROTATE
5971 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
5972 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (x
, 0), i
);
5976 case GT
: case GTU
: case GE
: case GEU
:
5977 case LT
: case LTU
: case LE
: case LEU
:
5978 case UNEQ
: case LTGT
:
5979 case UNGT
: case UNGE
:
5980 case UNLT
: case UNLE
:
5981 case UNORDERED
: case ORDERED
:
5982 /* If the first operand is a condition code, we can't do anything
5984 if (GET_CODE (XEXP (x
, 0)) == COMPARE
5985 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
5986 && ! CC0_P (XEXP (x
, 0))))
5988 rtx op0
= XEXP (x
, 0);
5989 rtx op1
= XEXP (x
, 1);
5990 enum rtx_code new_code
;
5992 if (GET_CODE (op0
) == COMPARE
)
5993 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
5995 /* Simplify our comparison, if possible. */
5996 new_code
= simplify_comparison (code
, &op0
, &op1
);
5998 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5999 if only the low-order bit is possibly nonzero in X (such as when
6000 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6001 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6002 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6005 Remove any ZERO_EXTRACT we made when thinking this was a
6006 comparison. It may now be simpler to use, e.g., an AND. If a
6007 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6008 the call to make_compound_operation in the SET case.
6010 Don't apply these optimizations if the caller would
6011 prefer a comparison rather than a value.
6012 E.g., for the condition in an IF_THEN_ELSE most targets need
6013 an explicit comparison. */
6018 else if (STORE_FLAG_VALUE
== 1
6019 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6020 && op1
== const0_rtx
6021 && mode
== GET_MODE (op0
)
6022 && nonzero_bits (op0
, mode
) == 1)
6023 return gen_lowpart (mode
,
6024 expand_compound_operation (op0
));
6026 else if (STORE_FLAG_VALUE
== 1
6027 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6028 && op1
== const0_rtx
6029 && mode
== GET_MODE (op0
)
6030 && (num_sign_bit_copies (op0
, mode
)
6031 == GET_MODE_PRECISION (mode
)))
6033 op0
= expand_compound_operation (op0
);
6034 return simplify_gen_unary (NEG
, mode
,
6035 gen_lowpart (mode
, op0
),
6039 else if (STORE_FLAG_VALUE
== 1
6040 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6041 && op1
== const0_rtx
6042 && mode
== GET_MODE (op0
)
6043 && nonzero_bits (op0
, mode
) == 1)
6045 op0
= expand_compound_operation (op0
);
6046 return simplify_gen_binary (XOR
, mode
,
6047 gen_lowpart (mode
, op0
),
6051 else if (STORE_FLAG_VALUE
== 1
6052 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6053 && op1
== const0_rtx
6054 && mode
== GET_MODE (op0
)
6055 && (num_sign_bit_copies (op0
, mode
)
6056 == GET_MODE_PRECISION (mode
)))
6058 op0
= expand_compound_operation (op0
);
6059 return plus_constant (mode
, gen_lowpart (mode
, op0
), 1);
6062 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6067 else if (STORE_FLAG_VALUE
== -1
6068 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6069 && op1
== const0_rtx
6070 && mode
== GET_MODE (op0
)
6071 && (num_sign_bit_copies (op0
, mode
)
6072 == GET_MODE_PRECISION (mode
)))
6073 return gen_lowpart (mode
,
6074 expand_compound_operation (op0
));
6076 else if (STORE_FLAG_VALUE
== -1
6077 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6078 && op1
== const0_rtx
6079 && mode
== GET_MODE (op0
)
6080 && nonzero_bits (op0
, mode
) == 1)
6082 op0
= expand_compound_operation (op0
);
6083 return simplify_gen_unary (NEG
, mode
,
6084 gen_lowpart (mode
, op0
),
6088 else if (STORE_FLAG_VALUE
== -1
6089 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6090 && op1
== const0_rtx
6091 && mode
== GET_MODE (op0
)
6092 && (num_sign_bit_copies (op0
, mode
)
6093 == GET_MODE_PRECISION (mode
)))
6095 op0
= expand_compound_operation (op0
);
6096 return simplify_gen_unary (NOT
, mode
,
6097 gen_lowpart (mode
, op0
),
6101 /* If X is 0/1, (eq X 0) is X-1. */
6102 else if (STORE_FLAG_VALUE
== -1
6103 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6104 && op1
== const0_rtx
6105 && mode
== GET_MODE (op0
)
6106 && nonzero_bits (op0
, mode
) == 1)
6108 op0
= expand_compound_operation (op0
);
6109 return plus_constant (mode
, gen_lowpart (mode
, op0
), -1);
6112 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6113 one bit that might be nonzero, we can convert (ne x 0) to
6114 (ashift x c) where C puts the bit in the sign bit. Remove any
6115 AND with STORE_FLAG_VALUE when we are done, since we are only
6116 going to test the sign bit. */
6117 if (new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6118 && HWI_COMPUTABLE_MODE_P (mode
)
6119 && val_signbit_p (mode
, STORE_FLAG_VALUE
)
6120 && op1
== const0_rtx
6121 && mode
== GET_MODE (op0
)
6122 && (i
= exact_log2 (nonzero_bits (op0
, mode
))) >= 0)
6124 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6125 expand_compound_operation (op0
),
6126 GET_MODE_PRECISION (mode
) - 1 - i
);
6127 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6133 /* If the code changed, return a whole new comparison.
6134 We also need to avoid using SUBST in cases where
6135 simplify_comparison has widened a comparison with a CONST_INT,
6136 since in that case the wider CONST_INT may fail the sanity
6137 checks in do_SUBST. */
6138 if (new_code
!= code
6139 || (CONST_INT_P (op1
)
6140 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6141 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6142 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6144 /* Otherwise, keep this operation, but maybe change its operands.
6145 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6146 SUBST (XEXP (x
, 0), op0
);
6147 SUBST (XEXP (x
, 1), op1
);
6152 return simplify_if_then_else (x
);
6158 /* If we are processing SET_DEST, we are done. */
6162 return expand_compound_operation (x
);
6165 return simplify_set (x
);
6169 return simplify_logical (x
);
6176 /* If this is a shift by a constant amount, simplify it. */
6177 if (CONST_INT_P (XEXP (x
, 1)))
6178 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6179 INTVAL (XEXP (x
, 1)));
6181 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6183 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6184 ((unsigned HOST_WIDE_INT
) 1
6185 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6197 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6200 simplify_if_then_else (rtx x
)
6202 machine_mode mode
= GET_MODE (x
);
6203 rtx cond
= XEXP (x
, 0);
6204 rtx true_rtx
= XEXP (x
, 1);
6205 rtx false_rtx
= XEXP (x
, 2);
6206 enum rtx_code true_code
= GET_CODE (cond
);
6207 int comparison_p
= COMPARISON_P (cond
);
6210 enum rtx_code false_code
;
6213 /* Simplify storing of the truth value. */
6214 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6215 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6216 XEXP (cond
, 0), XEXP (cond
, 1));
6218 /* Also when the truth value has to be reversed. */
6220 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6221 && (reversed
= reversed_comparison (cond
, mode
)))
6224 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6225 in it is being compared against certain values. Get the true and false
6226 comparisons and see if that says anything about the value of each arm. */
6229 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6231 && REG_P (XEXP (cond
, 0)))
6234 rtx from
= XEXP (cond
, 0);
6235 rtx true_val
= XEXP (cond
, 1);
6236 rtx false_val
= true_val
;
6239 /* If FALSE_CODE is EQ, swap the codes and arms. */
6241 if (false_code
== EQ
)
6243 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6244 std::swap (true_rtx
, false_rtx
);
6247 /* If we are comparing against zero and the expression being tested has
6248 only a single bit that might be nonzero, that is its value when it is
6249 not equal to zero. Similarly if it is known to be -1 or 0. */
6251 if (true_code
== EQ
&& true_val
== const0_rtx
6252 && exact_log2 (nzb
= nonzero_bits (from
, GET_MODE (from
))) >= 0)
6255 false_val
= gen_int_mode (nzb
, GET_MODE (from
));
6257 else if (true_code
== EQ
&& true_val
== const0_rtx
6258 && (num_sign_bit_copies (from
, GET_MODE (from
))
6259 == GET_MODE_PRECISION (GET_MODE (from
))))
6262 false_val
= constm1_rtx
;
6265 /* Now simplify an arm if we know the value of the register in the
6266 branch and it is used in the arm. Be careful due to the potential
6267 of locally-shared RTL. */
6269 if (reg_mentioned_p (from
, true_rtx
))
6270 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6272 pc_rtx
, pc_rtx
, 0, 0, 0);
6273 if (reg_mentioned_p (from
, false_rtx
))
6274 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6276 pc_rtx
, pc_rtx
, 0, 0, 0);
6278 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6279 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6281 true_rtx
= XEXP (x
, 1);
6282 false_rtx
= XEXP (x
, 2);
6283 true_code
= GET_CODE (cond
);
6286 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6287 reversed, do so to avoid needing two sets of patterns for
6288 subtract-and-branch insns. Similarly if we have a constant in the true
6289 arm, the false arm is the same as the first operand of the comparison, or
6290 the false arm is more complicated than the true arm. */
6293 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6294 && (true_rtx
== pc_rtx
6295 || (CONSTANT_P (true_rtx
)
6296 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6297 || true_rtx
== const0_rtx
6298 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6299 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6300 && !OBJECT_P (false_rtx
))
6301 || reg_mentioned_p (true_rtx
, false_rtx
)
6302 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6304 true_code
= reversed_comparison_code (cond
, NULL
);
6305 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6306 SUBST (XEXP (x
, 1), false_rtx
);
6307 SUBST (XEXP (x
, 2), true_rtx
);
6309 std::swap (true_rtx
, false_rtx
);
6312 /* It is possible that the conditional has been simplified out. */
6313 true_code
= GET_CODE (cond
);
6314 comparison_p
= COMPARISON_P (cond
);
6317 /* If the two arms are identical, we don't need the comparison. */
6319 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6322 /* Convert a == b ? b : a to "a". */
6323 if (true_code
== EQ
&& ! side_effects_p (cond
)
6324 && !HONOR_NANS (mode
)
6325 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6326 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6328 else if (true_code
== NE
&& ! side_effects_p (cond
)
6329 && !HONOR_NANS (mode
)
6330 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6331 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6334 /* Look for cases where we have (abs x) or (neg (abs X)). */
6336 if (GET_MODE_CLASS (mode
) == MODE_INT
6338 && XEXP (cond
, 1) == const0_rtx
6339 && GET_CODE (false_rtx
) == NEG
6340 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6341 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6342 && ! side_effects_p (true_rtx
))
6347 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6351 simplify_gen_unary (NEG
, mode
,
6352 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6358 /* Look for MIN or MAX. */
6360 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6362 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6363 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6364 && ! side_effects_p (cond
))
6369 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6372 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6375 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6378 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6383 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6384 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6385 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6386 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6387 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6388 neither 1 or -1, but it isn't worth checking for. */
6390 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6392 && GET_MODE_CLASS (mode
) == MODE_INT
6393 && ! side_effects_p (x
))
6395 rtx t
= make_compound_operation (true_rtx
, SET
);
6396 rtx f
= make_compound_operation (false_rtx
, SET
);
6397 rtx cond_op0
= XEXP (cond
, 0);
6398 rtx cond_op1
= XEXP (cond
, 1);
6399 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6400 machine_mode m
= mode
;
6401 rtx z
= 0, c1
= NULL_RTX
;
6403 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6404 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6405 || GET_CODE (t
) == ASHIFT
6406 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6407 && rtx_equal_p (XEXP (t
, 0), f
))
6408 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6410 /* If an identity-zero op is commutative, check whether there
6411 would be a match if we swapped the operands. */
6412 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6413 || GET_CODE (t
) == XOR
)
6414 && rtx_equal_p (XEXP (t
, 1), f
))
6415 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6416 else if (GET_CODE (t
) == SIGN_EXTEND
6417 && (GET_CODE (XEXP (t
, 0)) == PLUS
6418 || GET_CODE (XEXP (t
, 0)) == MINUS
6419 || GET_CODE (XEXP (t
, 0)) == IOR
6420 || GET_CODE (XEXP (t
, 0)) == XOR
6421 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6422 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6423 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6424 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6425 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6426 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6427 && (num_sign_bit_copies (f
, GET_MODE (f
))
6429 (GET_MODE_PRECISION (mode
)
6430 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 0))))))
6432 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6433 extend_op
= SIGN_EXTEND
;
6434 m
= GET_MODE (XEXP (t
, 0));
6436 else if (GET_CODE (t
) == SIGN_EXTEND
6437 && (GET_CODE (XEXP (t
, 0)) == PLUS
6438 || GET_CODE (XEXP (t
, 0)) == IOR
6439 || GET_CODE (XEXP (t
, 0)) == XOR
)
6440 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6441 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6442 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6443 && (num_sign_bit_copies (f
, GET_MODE (f
))
6445 (GET_MODE_PRECISION (mode
)
6446 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 1))))))
6448 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6449 extend_op
= SIGN_EXTEND
;
6450 m
= GET_MODE (XEXP (t
, 0));
6452 else if (GET_CODE (t
) == ZERO_EXTEND
6453 && (GET_CODE (XEXP (t
, 0)) == PLUS
6454 || GET_CODE (XEXP (t
, 0)) == MINUS
6455 || GET_CODE (XEXP (t
, 0)) == IOR
6456 || GET_CODE (XEXP (t
, 0)) == XOR
6457 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6458 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6459 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6460 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6461 && HWI_COMPUTABLE_MODE_P (mode
)
6462 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6463 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6464 && ((nonzero_bits (f
, GET_MODE (f
))
6465 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 0))))
6468 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6469 extend_op
= ZERO_EXTEND
;
6470 m
= GET_MODE (XEXP (t
, 0));
6472 else if (GET_CODE (t
) == ZERO_EXTEND
6473 && (GET_CODE (XEXP (t
, 0)) == PLUS
6474 || GET_CODE (XEXP (t
, 0)) == IOR
6475 || GET_CODE (XEXP (t
, 0)) == XOR
)
6476 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6477 && HWI_COMPUTABLE_MODE_P (mode
)
6478 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6479 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6480 && ((nonzero_bits (f
, GET_MODE (f
))
6481 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 1))))
6484 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6485 extend_op
= ZERO_EXTEND
;
6486 m
= GET_MODE (XEXP (t
, 0));
6491 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6492 cond_op0
, cond_op1
),
6493 pc_rtx
, pc_rtx
, 0, 0, 0);
6494 temp
= simplify_gen_binary (MULT
, m
, temp
,
6495 simplify_gen_binary (MULT
, m
, c1
,
6497 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6498 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6500 if (extend_op
!= UNKNOWN
)
6501 temp
= simplify_gen_unary (extend_op
, mode
, temp
, m
);
6507 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6508 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6509 negation of a single bit, we can convert this operation to a shift. We
6510 can actually do this more generally, but it doesn't seem worth it. */
6512 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6513 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6514 && ((1 == nonzero_bits (XEXP (cond
, 0), mode
)
6515 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6516 || ((num_sign_bit_copies (XEXP (cond
, 0), mode
)
6517 == GET_MODE_PRECISION (mode
))
6518 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6520 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6521 gen_lowpart (mode
, XEXP (cond
, 0)), i
);
6523 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6524 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6525 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6526 && GET_MODE (XEXP (cond
, 0)) == mode
6527 && (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))
6528 == nonzero_bits (XEXP (cond
, 0), mode
)
6529 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))) >= 0)
6530 return XEXP (cond
, 0);
6535 /* Simplify X, a SET expression. Return the new expression. */
6538 simplify_set (rtx x
)
6540 rtx src
= SET_SRC (x
);
6541 rtx dest
= SET_DEST (x
);
6543 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6544 rtx_insn
*other_insn
;
6547 /* (set (pc) (return)) gets written as (return). */
6548 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6551 /* Now that we know for sure which bits of SRC we are using, see if we can
6552 simplify the expression for the object knowing that we only need the
6555 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6557 src
= force_to_mode (src
, mode
, ~(unsigned HOST_WIDE_INT
) 0, 0);
6558 SUBST (SET_SRC (x
), src
);
6561 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6562 the comparison result and try to simplify it unless we already have used
6563 undobuf.other_insn. */
6564 if ((GET_MODE_CLASS (mode
) == MODE_CC
6565 || GET_CODE (src
) == COMPARE
6567 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6568 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6569 && COMPARISON_P (*cc_use
)
6570 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6572 enum rtx_code old_code
= GET_CODE (*cc_use
);
6573 enum rtx_code new_code
;
6575 int other_changed
= 0;
6576 rtx inner_compare
= NULL_RTX
;
6577 machine_mode compare_mode
= GET_MODE (dest
);
6579 if (GET_CODE (src
) == COMPARE
)
6581 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6582 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6584 inner_compare
= op0
;
6585 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6589 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6591 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6594 new_code
= old_code
;
6595 else if (!CONSTANT_P (tmp
))
6597 new_code
= GET_CODE (tmp
);
6598 op0
= XEXP (tmp
, 0);
6599 op1
= XEXP (tmp
, 1);
6603 rtx pat
= PATTERN (other_insn
);
6604 undobuf
.other_insn
= other_insn
;
6605 SUBST (*cc_use
, tmp
);
6607 /* Attempt to simplify CC user. */
6608 if (GET_CODE (pat
) == SET
)
6610 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6611 if (new_rtx
!= NULL_RTX
)
6612 SUBST (SET_SRC (pat
), new_rtx
);
6615 /* Convert X into a no-op move. */
6616 SUBST (SET_DEST (x
), pc_rtx
);
6617 SUBST (SET_SRC (x
), pc_rtx
);
6621 /* Simplify our comparison, if possible. */
6622 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6624 #ifdef SELECT_CC_MODE
6625 /* If this machine has CC modes other than CCmode, check to see if we
6626 need to use a different CC mode here. */
6627 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6628 compare_mode
= GET_MODE (op0
);
6629 else if (inner_compare
6630 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6631 && new_code
== old_code
6632 && op0
== XEXP (inner_compare
, 0)
6633 && op1
== XEXP (inner_compare
, 1))
6634 compare_mode
= GET_MODE (inner_compare
);
6636 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6638 /* If the mode changed, we have to change SET_DEST, the mode in the
6639 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6640 a hard register, just build new versions with the proper mode. If it
6641 is a pseudo, we lose unless it is only time we set the pseudo, in
6642 which case we can safely change its mode. */
6643 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6645 if (can_change_dest_mode (dest
, 0, compare_mode
))
6647 unsigned int regno
= REGNO (dest
);
6650 if (regno
< FIRST_PSEUDO_REGISTER
)
6651 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6654 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6655 new_dest
= regno_reg_rtx
[regno
];
6658 SUBST (SET_DEST (x
), new_dest
);
6659 SUBST (XEXP (*cc_use
, 0), new_dest
);
6665 #endif /* SELECT_CC_MODE */
6667 /* If the code changed, we have to build a new comparison in
6668 undobuf.other_insn. */
6669 if (new_code
!= old_code
)
6671 int other_changed_previously
= other_changed
;
6672 unsigned HOST_WIDE_INT mask
;
6673 rtx old_cc_use
= *cc_use
;
6675 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6679 /* If the only change we made was to change an EQ into an NE or
6680 vice versa, OP0 has only one bit that might be nonzero, and OP1
6681 is zero, check if changing the user of the condition code will
6682 produce a valid insn. If it won't, we can keep the original code
6683 in that insn by surrounding our operation with an XOR. */
6685 if (((old_code
== NE
&& new_code
== EQ
)
6686 || (old_code
== EQ
&& new_code
== NE
))
6687 && ! other_changed_previously
&& op1
== const0_rtx
6688 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6689 && exact_log2 (mask
= nonzero_bits (op0
, GET_MODE (op0
))) >= 0)
6691 rtx pat
= PATTERN (other_insn
), note
= 0;
6693 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6694 && ! check_asm_operands (pat
)))
6696 *cc_use
= old_cc_use
;
6699 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6707 undobuf
.other_insn
= other_insn
;
6709 /* Don't generate a compare of a CC with 0, just use that CC. */
6710 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6712 SUBST (SET_SRC (x
), op0
);
6715 /* Otherwise, if we didn't previously have the same COMPARE we
6716 want, create it from scratch. */
6717 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6718 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6720 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6726 /* Get SET_SRC in a form where we have placed back any
6727 compound expressions. Then do the checks below. */
6728 src
= make_compound_operation (src
, SET
);
6729 SUBST (SET_SRC (x
), src
);
6732 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6733 and X being a REG or (subreg (reg)), we may be able to convert this to
6734 (set (subreg:m2 x) (op)).
6736 We can always do this if M1 is narrower than M2 because that means that
6737 we only care about the low bits of the result.
6739 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6740 perform a narrower operation than requested since the high-order bits will
6741 be undefined. On machine where it is defined, this transformation is safe
6742 as long as M1 and M2 have the same number of words. */
6744 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6745 && !OBJECT_P (SUBREG_REG (src
))
6746 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6748 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6749 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6750 #ifndef WORD_REGISTER_OPERATIONS
6751 && (GET_MODE_SIZE (GET_MODE (src
))
6752 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))
6754 #ifdef CANNOT_CHANGE_MODE_CLASS
6755 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6756 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6757 GET_MODE (SUBREG_REG (src
)),
6761 || (GET_CODE (dest
) == SUBREG
6762 && REG_P (SUBREG_REG (dest
)))))
6764 SUBST (SET_DEST (x
),
6765 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6767 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6769 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6772 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6775 && GET_CODE (src
) == SUBREG
6776 && subreg_lowpart_p (src
)
6777 && (GET_MODE_PRECISION (GET_MODE (src
))
6778 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src
)))))
6780 rtx inner
= SUBREG_REG (src
);
6781 machine_mode inner_mode
= GET_MODE (inner
);
6783 /* Here we make sure that we don't have a sign bit on. */
6784 if (val_signbit_known_clear_p (GET_MODE (src
),
6785 nonzero_bits (inner
, inner_mode
)))
6787 SUBST (SET_SRC (x
), inner
);
6792 #ifdef LOAD_EXTEND_OP
6793 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6794 would require a paradoxical subreg. Replace the subreg with a
6795 zero_extend to avoid the reload that would otherwise be required. */
6797 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6798 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src
)))
6799 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src
))) != UNKNOWN
6800 && SUBREG_BYTE (src
) == 0
6801 && paradoxical_subreg_p (src
)
6802 && MEM_P (SUBREG_REG (src
)))
6805 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src
))),
6806 GET_MODE (src
), SUBREG_REG (src
)));
6812 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6813 are comparing an item known to be 0 or -1 against 0, use a logical
6814 operation instead. Check for one of the arms being an IOR of the other
6815 arm with some value. We compute three terms to be IOR'ed together. In
6816 practice, at most two will be nonzero. Then we do the IOR's. */
6818 if (GET_CODE (dest
) != PC
6819 && GET_CODE (src
) == IF_THEN_ELSE
6820 && GET_MODE_CLASS (GET_MODE (src
)) == MODE_INT
6821 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6822 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6823 && GET_MODE (src
) == GET_MODE (XEXP (XEXP (src
, 0), 0))
6824 #ifdef HAVE_conditional_move
6825 && ! can_conditionally_move_p (GET_MODE (src
))
6827 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0),
6828 GET_MODE (XEXP (XEXP (src
, 0), 0)))
6829 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src
, 0), 0))))
6830 && ! side_effects_p (src
))
6832 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6833 ? XEXP (src
, 1) : XEXP (src
, 2));
6834 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6835 ? XEXP (src
, 2) : XEXP (src
, 1));
6836 rtx term1
= const0_rtx
, term2
, term3
;
6838 if (GET_CODE (true_rtx
) == IOR
6839 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6840 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6841 else if (GET_CODE (true_rtx
) == IOR
6842 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6843 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6844 else if (GET_CODE (false_rtx
) == IOR
6845 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6846 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6847 else if (GET_CODE (false_rtx
) == IOR
6848 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6849 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6851 term2
= simplify_gen_binary (AND
, GET_MODE (src
),
6852 XEXP (XEXP (src
, 0), 0), true_rtx
);
6853 term3
= simplify_gen_binary (AND
, GET_MODE (src
),
6854 simplify_gen_unary (NOT
, GET_MODE (src
),
6855 XEXP (XEXP (src
, 0), 0),
6860 simplify_gen_binary (IOR
, GET_MODE (src
),
6861 simplify_gen_binary (IOR
, GET_MODE (src
),
6868 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6869 whole thing fail. */
6870 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6872 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6875 /* Convert this into a field assignment operation, if possible. */
6876 return make_field_assignment (x
);
6879 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6883 simplify_logical (rtx x
)
6885 machine_mode mode
= GET_MODE (x
);
6886 rtx op0
= XEXP (x
, 0);
6887 rtx op1
= XEXP (x
, 1);
6889 switch (GET_CODE (x
))
6892 /* We can call simplify_and_const_int only if we don't lose
6893 any (sign) bits when converting INTVAL (op1) to
6894 "unsigned HOST_WIDE_INT". */
6895 if (CONST_INT_P (op1
)
6896 && (HWI_COMPUTABLE_MODE_P (mode
)
6897 || INTVAL (op1
) > 0))
6899 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
6900 if (GET_CODE (x
) != AND
)
6907 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6908 apply the distributive law and then the inverse distributive
6909 law to see if things simplify. */
6910 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
6912 rtx result
= distribute_and_simplify_rtx (x
, 0);
6916 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
6918 rtx result
= distribute_and_simplify_rtx (x
, 1);
6925 /* If we have (ior (and A B) C), apply the distributive law and then
6926 the inverse distributive law to see if things simplify. */
6928 if (GET_CODE (op0
) == AND
)
6930 rtx result
= distribute_and_simplify_rtx (x
, 0);
6935 if (GET_CODE (op1
) == AND
)
6937 rtx result
= distribute_and_simplify_rtx (x
, 1);
6950 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6951 operations" because they can be replaced with two more basic operations.
6952 ZERO_EXTEND is also considered "compound" because it can be replaced with
6953 an AND operation, which is simpler, though only one operation.
6955 The function expand_compound_operation is called with an rtx expression
6956 and will convert it to the appropriate shifts and AND operations,
6957 simplifying at each stage.
6959 The function make_compound_operation is called to convert an expression
6960 consisting of shifts and ANDs into the equivalent compound expression.
6961 It is the inverse of this function, loosely speaking. */
6964 expand_compound_operation (rtx x
)
6966 unsigned HOST_WIDE_INT pos
= 0, len
;
6968 unsigned int modewidth
;
6971 switch (GET_CODE (x
))
6976 /* We can't necessarily use a const_int for a multiword mode;
6977 it depends on implicitly extending the value.
6978 Since we don't know the right way to extend it,
6979 we can't tell whether the implicit way is right.
6981 Even for a mode that is no wider than a const_int,
6982 we can't win, because we need to sign extend one of its bits through
6983 the rest of it, and we don't know which bit. */
6984 if (CONST_INT_P (XEXP (x
, 0)))
6987 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6988 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6989 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6990 reloaded. If not for that, MEM's would very rarely be safe.
6992 Reject MODEs bigger than a word, because we might not be able
6993 to reference a two-register group starting with an arbitrary register
6994 (and currently gen_lowpart might crash for a SUBREG). */
6996 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) > UNITS_PER_WORD
)
6999 /* Reject MODEs that aren't scalar integers because turning vector
7000 or complex modes into shifts causes problems. */
7002 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
7005 len
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
7006 /* If the inner object has VOIDmode (the only way this can happen
7007 is if it is an ASM_OPERANDS), we can't do anything since we don't
7008 know how much masking to do. */
7017 /* ... fall through ... */
7020 /* If the operand is a CLOBBER, just return it. */
7021 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7024 if (!CONST_INT_P (XEXP (x
, 1))
7025 || !CONST_INT_P (XEXP (x
, 2))
7026 || GET_MODE (XEXP (x
, 0)) == VOIDmode
)
7029 /* Reject MODEs that aren't scalar integers because turning vector
7030 or complex modes into shifts causes problems. */
7032 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
7035 len
= INTVAL (XEXP (x
, 1));
7036 pos
= INTVAL (XEXP (x
, 2));
7038 /* This should stay within the object being extracted, fail otherwise. */
7039 if (len
+ pos
> GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))))
7042 if (BITS_BIG_ENDIAN
)
7043 pos
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))) - len
- pos
;
7050 /* Convert sign extension to zero extension, if we know that the high
7051 bit is not set, as this is easier to optimize. It will be converted
7052 back to cheaper alternative in make_extraction. */
7053 if (GET_CODE (x
) == SIGN_EXTEND
7054 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7055 && ((nonzero_bits (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
7056 & ~(((unsigned HOST_WIDE_INT
)
7057 GET_MODE_MASK (GET_MODE (XEXP (x
, 0))))
7061 rtx temp
= gen_rtx_ZERO_EXTEND (GET_MODE (x
), XEXP (x
, 0));
7062 rtx temp2
= expand_compound_operation (temp
);
7064 /* Make sure this is a profitable operation. */
7065 if (set_src_cost (x
, optimize_this_for_speed_p
)
7066 > set_src_cost (temp2
, optimize_this_for_speed_p
))
7068 else if (set_src_cost (x
, optimize_this_for_speed_p
)
7069 > set_src_cost (temp
, optimize_this_for_speed_p
))
7075 /* We can optimize some special cases of ZERO_EXTEND. */
7076 if (GET_CODE (x
) == ZERO_EXTEND
)
7078 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7079 know that the last value didn't have any inappropriate bits
7081 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7082 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7083 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7084 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), GET_MODE (x
))
7085 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7086 return XEXP (XEXP (x
, 0), 0);
7088 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7089 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7090 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7091 && subreg_lowpart_p (XEXP (x
, 0))
7092 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7093 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), GET_MODE (x
))
7094 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7095 return SUBREG_REG (XEXP (x
, 0));
7097 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7098 is a comparison and STORE_FLAG_VALUE permits. This is like
7099 the first case, but it works even when GET_MODE (x) is larger
7100 than HOST_WIDE_INT. */
7101 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7102 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7103 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7104 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7105 <= HOST_BITS_PER_WIDE_INT
)
7106 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7107 return XEXP (XEXP (x
, 0), 0);
7109 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7110 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7111 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7112 && subreg_lowpart_p (XEXP (x
, 0))
7113 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7114 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7115 <= HOST_BITS_PER_WIDE_INT
)
7116 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7117 return SUBREG_REG (XEXP (x
, 0));
7121 /* If we reach here, we want to return a pair of shifts. The inner
7122 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7123 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7124 logical depending on the value of UNSIGNEDP.
7126 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7127 converted into an AND of a shift.
7129 We must check for the case where the left shift would have a negative
7130 count. This can happen in a case like (x >> 31) & 255 on machines
7131 that can't shift by a constant. On those machines, we would first
7132 combine the shift with the AND to produce a variable-position
7133 extraction. Then the constant of 31 would be substituted in
7134 to produce such a position. */
7136 modewidth
= GET_MODE_PRECISION (GET_MODE (x
));
7137 if (modewidth
>= pos
+ len
)
7139 machine_mode mode
= GET_MODE (x
);
7140 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7141 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7143 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7144 tem
, modewidth
- pos
- len
);
7145 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7146 mode
, tem
, modewidth
- len
);
7148 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7149 tem
= simplify_and_const_int (NULL_RTX
, GET_MODE (x
),
7150 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7153 ((unsigned HOST_WIDE_INT
) 1 << len
) - 1);
7155 /* Any other cases we can't handle. */
7158 /* If we couldn't do this for some reason, return the original
7160 if (GET_CODE (tem
) == CLOBBER
)
7166 /* X is a SET which contains an assignment of one object into
7167 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7168 or certain SUBREGS). If possible, convert it into a series of
7171 We half-heartedly support variable positions, but do not at all
7172 support variable lengths. */
7175 expand_field_assignment (const_rtx x
)
7178 rtx pos
; /* Always counts from low bit. */
7180 rtx mask
, cleared
, masked
;
7181 machine_mode compute_mode
;
7183 /* Loop until we find something we can't simplify. */
7186 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7187 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7189 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7190 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7191 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7193 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7194 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7196 inner
= XEXP (SET_DEST (x
), 0);
7197 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7198 pos
= XEXP (SET_DEST (x
), 2);
7200 /* A constant position should stay within the width of INNER. */
7201 if (CONST_INT_P (pos
)
7202 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7205 if (BITS_BIG_ENDIAN
)
7207 if (CONST_INT_P (pos
))
7208 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7210 else if (GET_CODE (pos
) == MINUS
7211 && CONST_INT_P (XEXP (pos
, 1))
7212 && (INTVAL (XEXP (pos
, 1))
7213 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7214 /* If position is ADJUST - X, new position is X. */
7215 pos
= XEXP (pos
, 0);
7218 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7219 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7220 gen_int_mode (prec
- len
,
7227 /* A SUBREG between two modes that occupy the same numbers of words
7228 can be done by moving the SUBREG to the source. */
7229 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7230 /* We need SUBREGs to compute nonzero_bits properly. */
7231 && nonzero_sign_valid
7232 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7233 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7234 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7235 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7237 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7239 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7246 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7247 inner
= SUBREG_REG (inner
);
7249 compute_mode
= GET_MODE (inner
);
7251 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7252 if (! SCALAR_INT_MODE_P (compute_mode
))
7256 /* Don't do anything for vector or complex integral types. */
7257 if (! FLOAT_MODE_P (compute_mode
))
7260 /* Try to find an integral mode to pun with. */
7261 imode
= mode_for_size (GET_MODE_BITSIZE (compute_mode
), MODE_INT
, 0);
7262 if (imode
== BLKmode
)
7265 compute_mode
= imode
;
7266 inner
= gen_lowpart (imode
, inner
);
7269 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7270 if (len
>= HOST_BITS_PER_WIDE_INT
)
7273 /* Now compute the equivalent expression. Make a copy of INNER
7274 for the SET_DEST in case it is a MEM into which we will substitute;
7275 we don't want shared RTL in that case. */
7276 mask
= gen_int_mode (((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
7278 cleared
= simplify_gen_binary (AND
, compute_mode
,
7279 simplify_gen_unary (NOT
, compute_mode
,
7280 simplify_gen_binary (ASHIFT
,
7285 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7286 simplify_gen_binary (
7288 gen_lowpart (compute_mode
, SET_SRC (x
)),
7292 x
= gen_rtx_SET (copy_rtx (inner
),
7293 simplify_gen_binary (IOR
, compute_mode
,
7300 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7301 it is an RTX that represents the (variable) starting position; otherwise,
7302 POS is the (constant) starting bit position. Both are counted from the LSB.
7304 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7306 IN_DEST is nonzero if this is a reference in the destination of a SET.
7307 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7308 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7311 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7312 ZERO_EXTRACT should be built even for bits starting at bit 0.
7314 MODE is the desired mode of the result (if IN_DEST == 0).
7316 The result is an RTX for the extraction or NULL_RTX if the target
7320 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7321 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7322 int in_dest
, int in_compare
)
7324 /* This mode describes the size of the storage area
7325 to fetch the overall value from. Within that, we
7326 ignore the POS lowest bits, etc. */
7327 machine_mode is_mode
= GET_MODE (inner
);
7328 machine_mode inner_mode
;
7329 machine_mode wanted_inner_mode
;
7330 machine_mode wanted_inner_reg_mode
= word_mode
;
7331 machine_mode pos_mode
= word_mode
;
7332 machine_mode extraction_mode
= word_mode
;
7333 machine_mode tmode
= mode_for_size (len
, MODE_INT
, 1);
7335 rtx orig_pos_rtx
= pos_rtx
;
7336 HOST_WIDE_INT orig_pos
;
7338 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7339 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7341 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7343 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7344 consider just the QI as the memory to extract from.
7345 The subreg adds or removes high bits; its mode is
7346 irrelevant to the meaning of this extraction,
7347 since POS and LEN count from the lsb. */
7348 if (MEM_P (SUBREG_REG (inner
)))
7349 is_mode
= GET_MODE (SUBREG_REG (inner
));
7350 inner
= SUBREG_REG (inner
);
7352 else if (GET_CODE (inner
) == ASHIFT
7353 && CONST_INT_P (XEXP (inner
, 1))
7354 && pos_rtx
== 0 && pos
== 0
7355 && len
> UINTVAL (XEXP (inner
, 1)))
7357 /* We're extracting the least significant bits of an rtx
7358 (ashift X (const_int C)), where LEN > C. Extract the
7359 least significant (LEN - C) bits of X, giving an rtx
7360 whose mode is MODE, then shift it left C times. */
7361 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7362 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7363 unsignedp
, in_dest
, in_compare
);
7365 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7367 else if (GET_CODE (inner
) == TRUNCATE
)
7368 inner
= XEXP (inner
, 0);
7370 inner_mode
= GET_MODE (inner
);
7372 /* See if this can be done without an extraction. We never can if the
7373 width of the field is not the same as that of some integer mode. For
7374 registers, we can only avoid the extraction if the position is at the
7375 low-order bit and this is either not in the destination or we have the
7376 appropriate STRICT_LOW_PART operation available.
7378 For MEM, we can avoid an extract if the field starts on an appropriate
7379 boundary and we can change the mode of the memory reference. */
7381 if (tmode
!= BLKmode
7382 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7384 && (inner_mode
== tmode
7386 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7387 || reg_truncated_to_mode (tmode
, inner
))
7390 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7391 || (MEM_P (inner
) && pos_rtx
== 0
7393 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7394 : BITS_PER_UNIT
)) == 0
7395 /* We can't do this if we are widening INNER_MODE (it
7396 may not be aligned, for one thing). */
7397 && GET_MODE_PRECISION (inner_mode
) >= GET_MODE_PRECISION (tmode
)
7398 && (inner_mode
== tmode
7399 || (! mode_dependent_address_p (XEXP (inner
, 0),
7400 MEM_ADDR_SPACE (inner
))
7401 && ! MEM_VOLATILE_P (inner
))))))
7403 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7404 field. If the original and current mode are the same, we need not
7405 adjust the offset. Otherwise, we do if bytes big endian.
7407 If INNER is not a MEM, get a piece consisting of just the field
7408 of interest (in this case POS % BITS_PER_WORD must be 0). */
7412 HOST_WIDE_INT offset
;
7414 /* POS counts from lsb, but make OFFSET count in memory order. */
7415 if (BYTES_BIG_ENDIAN
)
7416 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7418 offset
= pos
/ BITS_PER_UNIT
;
7420 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7422 else if (REG_P (inner
))
7424 if (tmode
!= inner_mode
)
7426 /* We can't call gen_lowpart in a DEST since we
7427 always want a SUBREG (see below) and it would sometimes
7428 return a new hard register. */
7431 HOST_WIDE_INT final_word
= pos
/ BITS_PER_WORD
;
7433 if (WORDS_BIG_ENDIAN
7434 && GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7435 final_word
= ((GET_MODE_SIZE (inner_mode
)
7436 - GET_MODE_SIZE (tmode
))
7437 / UNITS_PER_WORD
) - final_word
;
7439 final_word
*= UNITS_PER_WORD
;
7440 if (BYTES_BIG_ENDIAN
&&
7441 GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (tmode
))
7442 final_word
+= (GET_MODE_SIZE (inner_mode
)
7443 - GET_MODE_SIZE (tmode
)) % UNITS_PER_WORD
;
7445 /* Avoid creating invalid subregs, for example when
7446 simplifying (x>>32)&255. */
7447 if (!validate_subreg (tmode
, inner_mode
, inner
, final_word
))
7450 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, final_word
);
7453 new_rtx
= gen_lowpart (tmode
, inner
);
7459 new_rtx
= force_to_mode (inner
, tmode
,
7460 len
>= HOST_BITS_PER_WIDE_INT
7461 ? ~(unsigned HOST_WIDE_INT
) 0
7462 : ((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
7465 /* If this extraction is going into the destination of a SET,
7466 make a STRICT_LOW_PART unless we made a MEM. */
7469 return (MEM_P (new_rtx
) ? new_rtx
7470 : (GET_CODE (new_rtx
) != SUBREG
7471 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7472 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7477 if (CONST_SCALAR_INT_P (new_rtx
))
7478 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7479 mode
, new_rtx
, tmode
);
7481 /* If we know that no extraneous bits are set, and that the high
7482 bit is not set, convert the extraction to the cheaper of
7483 sign and zero extension, that are equivalent in these cases. */
7484 if (flag_expensive_optimizations
7485 && (HWI_COMPUTABLE_MODE_P (tmode
)
7486 && ((nonzero_bits (new_rtx
, tmode
)
7487 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7490 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7491 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7493 /* Prefer ZERO_EXTENSION, since it gives more information to
7495 if (set_src_cost (temp
, optimize_this_for_speed_p
)
7496 <= set_src_cost (temp1
, optimize_this_for_speed_p
))
7501 /* Otherwise, sign- or zero-extend unless we already are in the
7504 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7508 /* Unless this is a COMPARE or we have a funny memory reference,
7509 don't do anything with zero-extending field extracts starting at
7510 the low-order bit since they are simple AND operations. */
7511 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7512 && ! in_compare
&& unsignedp
)
7515 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7516 if the position is not a constant and the length is not 1. In all
7517 other cases, we would only be going outside our object in cases when
7518 an original shift would have been undefined. */
7520 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7521 || (pos_rtx
!= 0 && len
!= 1)))
7524 enum extraction_pattern pattern
= (in_dest
? EP_insv
7525 : unsignedp
? EP_extzv
: EP_extv
);
7527 /* If INNER is not from memory, we want it to have the mode of a register
7528 extraction pattern's structure operand, or word_mode if there is no
7529 such pattern. The same applies to extraction_mode and pos_mode
7530 and their respective operands.
7532 For memory, assume that the desired extraction_mode and pos_mode
7533 are the same as for a register operation, since at present we don't
7534 have named patterns for aligned memory structures. */
7535 struct extraction_insn insn
;
7536 if (get_best_reg_extraction_insn (&insn
, pattern
,
7537 GET_MODE_BITSIZE (inner_mode
), mode
))
7539 wanted_inner_reg_mode
= insn
.struct_mode
;
7540 pos_mode
= insn
.pos_mode
;
7541 extraction_mode
= insn
.field_mode
;
7544 /* Never narrow an object, since that might not be safe. */
7546 if (mode
!= VOIDmode
7547 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7548 extraction_mode
= mode
;
7551 wanted_inner_mode
= wanted_inner_reg_mode
;
7554 /* Be careful not to go beyond the extracted object and maintain the
7555 natural alignment of the memory. */
7556 wanted_inner_mode
= smallest_mode_for_size (len
, MODE_INT
);
7557 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7558 > GET_MODE_BITSIZE (wanted_inner_mode
))
7560 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
);
7561 gcc_assert (wanted_inner_mode
!= VOIDmode
);
7567 if (BITS_BIG_ENDIAN
)
7569 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7570 BITS_BIG_ENDIAN style. If position is constant, compute new
7571 position. Otherwise, build subtraction.
7572 Note that POS is relative to the mode of the original argument.
7573 If it's a MEM we need to recompute POS relative to that.
7574 However, if we're extracting from (or inserting into) a register,
7575 we want to recompute POS relative to wanted_inner_mode. */
7576 int width
= (MEM_P (inner
)
7577 ? GET_MODE_BITSIZE (is_mode
)
7578 : GET_MODE_BITSIZE (wanted_inner_mode
));
7581 pos
= width
- len
- pos
;
7584 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7585 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7587 /* POS may be less than 0 now, but we check for that below.
7588 Note that it can only be less than 0 if !MEM_P (inner). */
7591 /* If INNER has a wider mode, and this is a constant extraction, try to
7592 make it smaller and adjust the byte to point to the byte containing
7594 if (wanted_inner_mode
!= VOIDmode
7595 && inner_mode
!= wanted_inner_mode
7597 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7599 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7600 && ! MEM_VOLATILE_P (inner
))
7604 /* The computations below will be correct if the machine is big
7605 endian in both bits and bytes or little endian in bits and bytes.
7606 If it is mixed, we must adjust. */
7608 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7609 adjust OFFSET to compensate. */
7610 if (BYTES_BIG_ENDIAN
7611 && GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (is_mode
))
7612 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7614 /* We can now move to the desired byte. */
7615 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7616 * GET_MODE_SIZE (wanted_inner_mode
);
7617 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7619 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7620 && is_mode
!= wanted_inner_mode
)
7621 offset
= (GET_MODE_SIZE (is_mode
)
7622 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7624 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7627 /* If INNER is not memory, get it into the proper mode. If we are changing
7628 its mode, POS must be a constant and smaller than the size of the new
7630 else if (!MEM_P (inner
))
7632 /* On the LHS, don't create paradoxical subregs implicitely truncating
7633 the register unless TRULY_NOOP_TRUNCATION. */
7635 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7639 if (GET_MODE (inner
) != wanted_inner_mode
7641 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7647 inner
= force_to_mode (inner
, wanted_inner_mode
,
7649 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7650 ? ~(unsigned HOST_WIDE_INT
) 0
7651 : ((((unsigned HOST_WIDE_INT
) 1 << len
) - 1)
7656 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7657 have to zero extend. Otherwise, we can just use a SUBREG. */
7659 && GET_MODE_SIZE (pos_mode
) > GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7661 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7662 GET_MODE (pos_rtx
));
7664 /* If we know that no extraneous bits are set, and that the high
7665 bit is not set, convert extraction to cheaper one - either
7666 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7668 if (flag_expensive_optimizations
7669 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7670 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7671 & ~(((unsigned HOST_WIDE_INT
)
7672 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7676 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7677 GET_MODE (pos_rtx
));
7679 /* Prefer ZERO_EXTENSION, since it gives more information to
7681 if (set_src_cost (temp1
, optimize_this_for_speed_p
)
7682 < set_src_cost (temp
, optimize_this_for_speed_p
))
7688 /* Make POS_RTX unless we already have it and it is correct. If we don't
7689 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7691 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7692 pos_rtx
= orig_pos_rtx
;
7694 else if (pos_rtx
== 0)
7695 pos_rtx
= GEN_INT (pos
);
7697 /* Make the required operation. See if we can use existing rtx. */
7698 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7699 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7701 new_rtx
= gen_lowpart (mode
, new_rtx
);
7706 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7707 with any other operations in X. Return X without that shift if so. */
7710 extract_left_shift (rtx x
, int count
)
7712 enum rtx_code code
= GET_CODE (x
);
7713 machine_mode mode
= GET_MODE (x
);
7719 /* This is the shift itself. If it is wide enough, we will return
7720 either the value being shifted if the shift count is equal to
7721 COUNT or a shift for the difference. */
7722 if (CONST_INT_P (XEXP (x
, 1))
7723 && INTVAL (XEXP (x
, 1)) >= count
)
7724 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7725 INTVAL (XEXP (x
, 1)) - count
);
7729 if ((tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7730 return simplify_gen_unary (code
, mode
, tem
, mode
);
7734 case PLUS
: case IOR
: case XOR
: case AND
:
7735 /* If we can safely shift this constant and we find the inner shift,
7736 make a new operation. */
7737 if (CONST_INT_P (XEXP (x
, 1))
7738 && (UINTVAL (XEXP (x
, 1))
7739 & ((((unsigned HOST_WIDE_INT
) 1 << count
)) - 1)) == 0
7740 && (tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7742 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7743 return simplify_gen_binary (code
, mode
, tem
,
7744 gen_int_mode (val
, mode
));
7755 /* Look at the expression rooted at X. Look for expressions
7756 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7757 Form these expressions.
7759 Return the new rtx, usually just X.
7761 Also, for machines like the VAX that don't have logical shift insns,
7762 try to convert logical to arithmetic shift operations in cases where
7763 they are equivalent. This undoes the canonicalizations to logical
7764 shifts done elsewhere.
7766 We try, as much as possible, to re-use rtl expressions to save memory.
7768 IN_CODE says what kind of expression we are processing. Normally, it is
7769 SET. In a memory address it is MEM. When processing the arguments of
7770 a comparison or a COMPARE against zero, it is COMPARE. */
7773 make_compound_operation (rtx x
, enum rtx_code in_code
)
7775 enum rtx_code code
= GET_CODE (x
);
7776 machine_mode mode
= GET_MODE (x
);
7777 int mode_width
= GET_MODE_PRECISION (mode
);
7779 enum rtx_code next_code
;
7785 /* Select the code to be used in recursive calls. Once we are inside an
7786 address, we stay there. If we have a comparison, set to COMPARE,
7787 but once inside, go back to our default of SET. */
7789 next_code
= (code
== MEM
? MEM
7790 : ((code
== COMPARE
|| COMPARISON_P (x
))
7791 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
7792 : in_code
== COMPARE
? SET
: in_code
);
7794 /* Process depending on the code of this operation. If NEW is set
7795 nonzero, it will be returned. */
7800 /* Convert shifts by constants into multiplications if inside
7802 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7803 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7804 && INTVAL (XEXP (x
, 1)) >= 0
7805 && SCALAR_INT_MODE_P (mode
))
7807 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7808 HOST_WIDE_INT multval
= (HOST_WIDE_INT
) 1 << count
;
7810 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7811 if (GET_CODE (new_rtx
) == NEG
)
7813 new_rtx
= XEXP (new_rtx
, 0);
7816 multval
= trunc_int_for_mode (multval
, mode
);
7817 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7824 lhs
= make_compound_operation (lhs
, next_code
);
7825 rhs
= make_compound_operation (rhs
, next_code
);
7826 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
7827 && SCALAR_INT_MODE_P (mode
))
7829 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7831 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7833 else if (GET_CODE (lhs
) == MULT
7834 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7836 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7837 simplify_gen_unary (NEG
, mode
,
7840 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7844 SUBST (XEXP (x
, 0), lhs
);
7845 SUBST (XEXP (x
, 1), rhs
);
7848 x
= gen_lowpart (mode
, new_rtx
);
7854 lhs
= make_compound_operation (lhs
, next_code
);
7855 rhs
= make_compound_operation (rhs
, next_code
);
7856 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
7857 && SCALAR_INT_MODE_P (mode
))
7859 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7861 new_rtx
= simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7863 else if (GET_CODE (rhs
) == MULT
7864 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7866 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7867 simplify_gen_unary (NEG
, mode
,
7870 new_rtx
= simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7874 SUBST (XEXP (x
, 0), lhs
);
7875 SUBST (XEXP (x
, 1), rhs
);
7878 return gen_lowpart (mode
, new_rtx
);
7881 /* If the second operand is not a constant, we can't do anything
7883 if (!CONST_INT_P (XEXP (x
, 1)))
7886 /* If the constant is a power of two minus one and the first operand
7887 is a logical right shift, make an extraction. */
7888 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7889 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7891 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7892 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7893 0, in_code
== COMPARE
);
7896 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7897 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7898 && subreg_lowpart_p (XEXP (x
, 0))
7899 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7900 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7902 new_rtx
= make_compound_operation (XEXP (SUBREG_REG (XEXP (x
, 0)), 0),
7904 new_rtx
= make_extraction (GET_MODE (SUBREG_REG (XEXP (x
, 0))), new_rtx
, 0,
7905 XEXP (SUBREG_REG (XEXP (x
, 0)), 1), i
, 1,
7906 0, in_code
== COMPARE
);
7908 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7909 else if ((GET_CODE (XEXP (x
, 0)) == XOR
7910 || GET_CODE (XEXP (x
, 0)) == IOR
)
7911 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
7912 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
7913 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7915 /* Apply the distributive law, and then try to make extractions. */
7916 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
7917 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
7919 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
7921 new_rtx
= make_compound_operation (new_rtx
, in_code
);
7924 /* If we are have (and (rotate X C) M) and C is larger than the number
7925 of bits in M, this is an extraction. */
7927 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
7928 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7929 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
7930 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
7932 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7933 new_rtx
= make_extraction (mode
, new_rtx
,
7934 (GET_MODE_PRECISION (mode
)
7935 - INTVAL (XEXP (XEXP (x
, 0), 1))),
7936 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7939 /* On machines without logical shifts, if the operand of the AND is
7940 a logical shift and our mask turns off all the propagated sign
7941 bits, we can replace the logical shift with an arithmetic shift. */
7942 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7943 && !have_insn_for (LSHIFTRT
, mode
)
7944 && have_insn_for (ASHIFTRT
, mode
)
7945 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7946 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
7947 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
7948 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
7950 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
7952 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
7953 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
7955 gen_rtx_ASHIFTRT (mode
,
7956 make_compound_operation
7957 (XEXP (XEXP (x
, 0), 0), next_code
),
7958 XEXP (XEXP (x
, 0), 1)));
7961 /* If the constant is one less than a power of two, this might be
7962 representable by an extraction even if no shift is present.
7963 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7964 we are in a COMPARE. */
7965 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7966 new_rtx
= make_extraction (mode
,
7967 make_compound_operation (XEXP (x
, 0),
7969 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7971 /* If we are in a comparison and this is an AND with a power of two,
7972 convert this into the appropriate bit extract. */
7973 else if (in_code
== COMPARE
7974 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
7975 new_rtx
= make_extraction (mode
,
7976 make_compound_operation (XEXP (x
, 0),
7978 i
, NULL_RTX
, 1, 1, 0, 1);
7983 /* If the sign bit is known to be zero, replace this with an
7984 arithmetic shift. */
7985 if (have_insn_for (ASHIFTRT
, mode
)
7986 && ! have_insn_for (LSHIFTRT
, mode
)
7987 && mode_width
<= HOST_BITS_PER_WIDE_INT
7988 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
7990 new_rtx
= gen_rtx_ASHIFTRT (mode
,
7991 make_compound_operation (XEXP (x
, 0),
7997 /* ... fall through ... */
8003 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8004 this is a SIGN_EXTRACT. */
8005 if (CONST_INT_P (rhs
)
8006 && GET_CODE (lhs
) == ASHIFT
8007 && CONST_INT_P (XEXP (lhs
, 1))
8008 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8009 && INTVAL (XEXP (lhs
, 1)) >= 0
8010 && INTVAL (rhs
) < mode_width
)
8012 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8013 new_rtx
= make_extraction (mode
, new_rtx
,
8014 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8015 NULL_RTX
, mode_width
- INTVAL (rhs
),
8016 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8020 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8021 If so, try to merge the shifts into a SIGN_EXTEND. We could
8022 also do this for some cases of SIGN_EXTRACT, but it doesn't
8023 seem worth the effort; the case checked for occurs on Alpha. */
8026 && ! (GET_CODE (lhs
) == SUBREG
8027 && (OBJECT_P (SUBREG_REG (lhs
))))
8028 && CONST_INT_P (rhs
)
8029 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8030 && INTVAL (rhs
) < mode_width
8031 && (new_rtx
= extract_left_shift (lhs
, INTVAL (rhs
))) != 0)
8032 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
8033 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8034 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8039 /* Call ourselves recursively on the inner expression. If we are
8040 narrowing the object and it has a different RTL code from
8041 what it originally did, do this SUBREG as a force_to_mode. */
8043 rtx inner
= SUBREG_REG (x
), simplified
;
8044 enum rtx_code subreg_code
= in_code
;
8046 /* If in_code is COMPARE, it isn't always safe to pass it through
8047 to the recursive make_compound_operation call. */
8048 if (subreg_code
== COMPARE
8049 && (!subreg_lowpart_p (x
)
8050 || GET_CODE (inner
) == SUBREG
8051 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8052 is (const_int 0), rather than
8053 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
8054 || (GET_CODE (inner
) == AND
8055 && CONST_INT_P (XEXP (inner
, 1))
8056 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8057 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8058 >= GET_MODE_BITSIZE (mode
))))
8061 tem
= make_compound_operation (inner
, subreg_code
);
8064 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8068 if (GET_CODE (tem
) != GET_CODE (inner
)
8069 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8070 && subreg_lowpart_p (x
))
8073 = force_to_mode (tem
, mode
, ~(unsigned HOST_WIDE_INT
) 0, 0);
8075 /* If we have something other than a SUBREG, we might have
8076 done an expansion, so rerun ourselves. */
8077 if (GET_CODE (newer
) != SUBREG
)
8078 newer
= make_compound_operation (newer
, in_code
);
8080 /* force_to_mode can expand compounds. If it just re-expanded the
8081 compound, use gen_lowpart to convert to the desired mode. */
8082 if (rtx_equal_p (newer
, x
)
8083 /* Likewise if it re-expanded the compound only partially.
8084 This happens for SUBREG of ZERO_EXTRACT if they extract
8085 the same number of bits. */
8086 || (GET_CODE (newer
) == SUBREG
8087 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8088 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8089 && GET_CODE (inner
) == AND
8090 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8091 return gen_lowpart (GET_MODE (x
), tem
);
8107 x
= gen_lowpart (mode
, new_rtx
);
8108 code
= GET_CODE (x
);
8111 /* Now recursively process each operand of this operation. We need to
8112 handle ZERO_EXTEND specially so that we don't lose track of the
8114 if (GET_CODE (x
) == ZERO_EXTEND
)
8116 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8117 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8118 new_rtx
, GET_MODE (XEXP (x
, 0)));
8121 SUBST (XEXP (x
, 0), new_rtx
);
8125 fmt
= GET_RTX_FORMAT (code
);
8126 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8129 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8130 SUBST (XEXP (x
, i
), new_rtx
);
8132 else if (fmt
[i
] == 'E')
8133 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8135 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8136 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8140 /* If this is a commutative operation, the changes to the operands
8141 may have made it noncanonical. */
8142 if (COMMUTATIVE_ARITH_P (x
)
8143 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
8146 SUBST (XEXP (x
, 0), XEXP (x
, 1));
8147 SUBST (XEXP (x
, 1), tem
);
8153 /* Given M see if it is a value that would select a field of bits
8154 within an item, but not the entire word. Return -1 if not.
8155 Otherwise, return the starting position of the field, where 0 is the
8158 *PLEN is set to the length of the field. */
8161 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8163 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8164 int pos
= m
? ctz_hwi (m
) : -1;
8168 /* Now shift off the low-order zero bits and see if we have a
8169 power of two minus 1. */
8170 len
= exact_log2 ((m
>> pos
) + 1);
8179 /* If X refers to a register that equals REG in value, replace these
8180 references with REG. */
8182 canon_reg_for_combine (rtx x
, rtx reg
)
8189 enum rtx_code code
= GET_CODE (x
);
8190 switch (GET_RTX_CLASS (code
))
8193 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8194 if (op0
!= XEXP (x
, 0))
8195 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8200 case RTX_COMM_ARITH
:
8201 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8202 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8203 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8204 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8208 case RTX_COMM_COMPARE
:
8209 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8210 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8211 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8212 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8213 GET_MODE (op0
), op0
, op1
);
8217 case RTX_BITFIELD_OPS
:
8218 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8219 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8220 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8221 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8222 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8223 GET_MODE (op0
), op0
, op1
, op2
);
8228 if (rtx_equal_p (get_last_value (reg
), x
)
8229 || rtx_equal_p (reg
, get_last_value (x
)))
8238 fmt
= GET_RTX_FORMAT (code
);
8240 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8243 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8244 if (op
!= XEXP (x
, i
))
8254 else if (fmt
[i
] == 'E')
8257 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8259 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8260 if (op
!= XVECEXP (x
, i
, j
))
8267 XVECEXP (x
, i
, j
) = op
;
8278 /* Return X converted to MODE. If the value is already truncated to
8279 MODE we can just return a subreg even though in the general case we
8280 would need an explicit truncation. */
8283 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8285 if (!CONST_INT_P (x
)
8286 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
8287 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8288 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8290 /* Bit-cast X into an integer mode. */
8291 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8292 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)), x
);
8293 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
),
8297 return gen_lowpart (mode
, x
);
8300 /* See if X can be simplified knowing that we will only refer to it in
8301 MODE and will only refer to those bits that are nonzero in MASK.
8302 If other bits are being computed or if masking operations are done
8303 that select a superset of the bits in MASK, they can sometimes be
8306 Return a possibly simplified expression, but always convert X to
8307 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8309 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8310 are all off in X. This is used when X will be complemented, by either
8311 NOT, NEG, or XOR. */
8314 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8317 enum rtx_code code
= GET_CODE (x
);
8318 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8319 machine_mode op_mode
;
8320 unsigned HOST_WIDE_INT fuller_mask
, nonzero
;
8323 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8324 code below will do the wrong thing since the mode of such an
8325 expression is VOIDmode.
8327 Also do nothing if X is a CLOBBER; this can happen if X was
8328 the return value from a call to gen_lowpart. */
8329 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8332 /* We want to perform the operation in its present mode unless we know
8333 that the operation is valid in MODE, in which case we do the operation
8335 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8336 && have_insn_for (code
, mode
))
8337 ? mode
: GET_MODE (x
));
8339 /* It is not valid to do a right-shift in a narrower mode
8340 than the one it came in with. */
8341 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8342 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (GET_MODE (x
)))
8343 op_mode
= GET_MODE (x
);
8345 /* Truncate MASK to fit OP_MODE. */
8347 mask
&= GET_MODE_MASK (op_mode
);
8349 /* When we have an arithmetic operation, or a shift whose count we
8350 do not know, we need to assume that all bits up to the highest-order
8351 bit in MASK will be needed. This is how we form such a mask. */
8352 if (mask
& ((unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)))
8353 fuller_mask
= ~(unsigned HOST_WIDE_INT
) 0;
8355 fuller_mask
= (((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mask
) + 1))
8358 /* Determine what bits of X are guaranteed to be (non)zero. */
8359 nonzero
= nonzero_bits (x
, mode
);
8361 /* If none of the bits in X are needed, return a zero. */
8362 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8365 /* If X is a CONST_INT, return a new one. Do this here since the
8366 test below will fail. */
8367 if (CONST_INT_P (x
))
8369 if (SCALAR_INT_MODE_P (mode
))
8370 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8373 x
= GEN_INT (INTVAL (x
) & mask
);
8374 return gen_lowpart_common (mode
, x
);
8378 /* If X is narrower than MODE and we want all the bits in X's mode, just
8379 get X in the proper mode. */
8380 if (GET_MODE_SIZE (GET_MODE (x
)) < GET_MODE_SIZE (mode
)
8381 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8382 return gen_lowpart (mode
, x
);
8384 /* We can ignore the effect of a SUBREG if it narrows the mode or
8385 if the constant masks to zero all the bits the mode doesn't have. */
8386 if (GET_CODE (x
) == SUBREG
8387 && subreg_lowpart_p (x
)
8388 && ((GET_MODE_SIZE (GET_MODE (x
))
8389 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8391 & GET_MODE_MASK (GET_MODE (x
))
8392 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8393 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8395 /* The arithmetic simplifications here only work for scalar integer modes. */
8396 if (!SCALAR_INT_MODE_P (mode
) || !SCALAR_INT_MODE_P (GET_MODE (x
)))
8397 return gen_lowpart_or_truncate (mode
, x
);
8402 /* If X is a (clobber (const_int)), return it since we know we are
8403 generating something that won't match. */
8410 x
= expand_compound_operation (x
);
8411 if (GET_CODE (x
) != code
)
8412 return force_to_mode (x
, mode
, mask
, next_select
);
8416 /* Similarly for a truncate. */
8417 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8420 /* If this is an AND with a constant, convert it into an AND
8421 whose constant is the AND of that constant with MASK. If it
8422 remains an AND of MASK, delete it since it is redundant. */
8424 if (CONST_INT_P (XEXP (x
, 1)))
8426 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8427 mask
& INTVAL (XEXP (x
, 1)));
8429 /* If X is still an AND, see if it is an AND with a mask that
8430 is just some low-order bits. If so, and it is MASK, we don't
8433 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8434 && ((INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (GET_MODE (x
)))
8438 /* If it remains an AND, try making another AND with the bits
8439 in the mode mask that aren't in MASK turned on. If the
8440 constant in the AND is wide enough, this might make a
8441 cheaper constant. */
8443 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8444 && GET_MODE_MASK (GET_MODE (x
)) != mask
8445 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
8447 unsigned HOST_WIDE_INT cval
8448 = UINTVAL (XEXP (x
, 1))
8449 | (GET_MODE_MASK (GET_MODE (x
)) & ~mask
);
8452 y
= simplify_gen_binary (AND
, GET_MODE (x
), XEXP (x
, 0),
8453 gen_int_mode (cval
, GET_MODE (x
)));
8454 if (set_src_cost (y
, optimize_this_for_speed_p
)
8455 < set_src_cost (x
, optimize_this_for_speed_p
))
8465 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8466 low-order bits (as in an alignment operation) and FOO is already
8467 aligned to that boundary, mask C1 to that boundary as well.
8468 This may eliminate that PLUS and, later, the AND. */
8471 unsigned int width
= GET_MODE_PRECISION (mode
);
8472 unsigned HOST_WIDE_INT smask
= mask
;
8474 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8475 number, sign extend it. */
8477 if (width
< HOST_BITS_PER_WIDE_INT
8478 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8479 smask
|= HOST_WIDE_INT_M1U
<< width
;
8481 if (CONST_INT_P (XEXP (x
, 1))
8482 && exact_log2 (- smask
) >= 0
8483 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8484 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8485 return force_to_mode (plus_constant (GET_MODE (x
), XEXP (x
, 0),
8486 (INTVAL (XEXP (x
, 1)) & smask
)),
8487 mode
, smask
, next_select
);
8490 /* ... fall through ... */
8493 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8494 most significant bit in MASK since carries from those bits will
8495 affect the bits we are interested in. */
8500 /* If X is (minus C Y) where C's least set bit is larger than any bit
8501 in the mask, then we may replace with (neg Y). */
8502 if (CONST_INT_P (XEXP (x
, 0))
8503 && ((UINTVAL (XEXP (x
, 0)) & -UINTVAL (XEXP (x
, 0))) > mask
))
8505 x
= simplify_gen_unary (NEG
, GET_MODE (x
), XEXP (x
, 1),
8507 return force_to_mode (x
, mode
, mask
, next_select
);
8510 /* Similarly, if C contains every bit in the fuller_mask, then we may
8511 replace with (not Y). */
8512 if (CONST_INT_P (XEXP (x
, 0))
8513 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8515 x
= simplify_gen_unary (NOT
, GET_MODE (x
),
8516 XEXP (x
, 1), GET_MODE (x
));
8517 return force_to_mode (x
, mode
, mask
, next_select
);
8525 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8526 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8527 operation which may be a bitfield extraction. Ensure that the
8528 constant we form is not wider than the mode of X. */
8530 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8531 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8532 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8533 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8534 && CONST_INT_P (XEXP (x
, 1))
8535 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8536 + floor_log2 (INTVAL (XEXP (x
, 1))))
8537 < GET_MODE_PRECISION (GET_MODE (x
)))
8538 && (UINTVAL (XEXP (x
, 1))
8539 & ~nonzero_bits (XEXP (x
, 0), GET_MODE (x
))) == 0)
8541 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8542 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8544 temp
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
8545 XEXP (XEXP (x
, 0), 0), temp
);
8546 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), temp
,
8547 XEXP (XEXP (x
, 0), 1));
8548 return force_to_mode (x
, mode
, mask
, next_select
);
8552 /* For most binary operations, just propagate into the operation and
8553 change the mode if we have an operation of that mode. */
8555 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8556 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8558 /* If we ended up truncating both operands, truncate the result of the
8559 operation instead. */
8560 if (GET_CODE (op0
) == TRUNCATE
8561 && GET_CODE (op1
) == TRUNCATE
)
8563 op0
= XEXP (op0
, 0);
8564 op1
= XEXP (op1
, 0);
8567 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8568 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8570 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8571 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8575 /* For left shifts, do the same, but just for the first operand.
8576 However, we cannot do anything with shifts where we cannot
8577 guarantee that the counts are smaller than the size of the mode
8578 because such a count will have a different meaning in a
8581 if (! (CONST_INT_P (XEXP (x
, 1))
8582 && INTVAL (XEXP (x
, 1)) >= 0
8583 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8584 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8585 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8586 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8589 /* If the shift count is a constant and we can do arithmetic in
8590 the mode of the shift, refine which bits we need. Otherwise, use the
8591 conservative form of the mask. */
8592 if (CONST_INT_P (XEXP (x
, 1))
8593 && INTVAL (XEXP (x
, 1)) >= 0
8594 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8595 && HWI_COMPUTABLE_MODE_P (op_mode
))
8596 mask
>>= INTVAL (XEXP (x
, 1));
8600 op0
= gen_lowpart_or_truncate (op_mode
,
8601 force_to_mode (XEXP (x
, 0), op_mode
,
8602 mask
, next_select
));
8604 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8605 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8609 /* Here we can only do something if the shift count is a constant,
8610 this shift constant is valid for the host, and we can do arithmetic
8613 if (CONST_INT_P (XEXP (x
, 1))
8614 && INTVAL (XEXP (x
, 1)) >= 0
8615 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8616 && HWI_COMPUTABLE_MODE_P (op_mode
))
8618 rtx inner
= XEXP (x
, 0);
8619 unsigned HOST_WIDE_INT inner_mask
;
8621 /* Select the mask of the bits we need for the shift operand. */
8622 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8624 /* We can only change the mode of the shift if we can do arithmetic
8625 in the mode of the shift and INNER_MASK is no wider than the
8626 width of X's mode. */
8627 if ((inner_mask
& ~GET_MODE_MASK (GET_MODE (x
))) != 0)
8628 op_mode
= GET_MODE (x
);
8630 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8632 if (GET_MODE (x
) != op_mode
|| inner
!= XEXP (x
, 0))
8633 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8636 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8637 shift and AND produces only copies of the sign bit (C2 is one less
8638 than a power of two), we can do this with just a shift. */
8640 if (GET_CODE (x
) == LSHIFTRT
8641 && CONST_INT_P (XEXP (x
, 1))
8642 /* The shift puts one of the sign bit copies in the least significant
8644 && ((INTVAL (XEXP (x
, 1))
8645 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8646 >= GET_MODE_PRECISION (GET_MODE (x
)))
8647 && exact_log2 (mask
+ 1) >= 0
8648 /* Number of bits left after the shift must be more than the mask
8650 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8651 <= GET_MODE_PRECISION (GET_MODE (x
)))
8652 /* Must be more sign bit copies than the mask needs. */
8653 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8654 >= exact_log2 (mask
+ 1)))
8655 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8656 GEN_INT (GET_MODE_PRECISION (GET_MODE (x
))
8657 - exact_log2 (mask
+ 1)));
8662 /* If we are just looking for the sign bit, we don't need this shift at
8663 all, even if it has a variable count. */
8664 if (val_signbit_p (GET_MODE (x
), mask
))
8665 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8667 /* If this is a shift by a constant, get a mask that contains those bits
8668 that are not copies of the sign bit. We then have two cases: If
8669 MASK only includes those bits, this can be a logical shift, which may
8670 allow simplifications. If MASK is a single-bit field not within
8671 those bits, we are requesting a copy of the sign bit and hence can
8672 shift the sign bit to the appropriate location. */
8674 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8675 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8679 /* If the considered data is wider than HOST_WIDE_INT, we can't
8680 represent a mask for all its bits in a single scalar.
8681 But we only care about the lower bits, so calculate these. */
8683 if (GET_MODE_PRECISION (GET_MODE (x
)) > HOST_BITS_PER_WIDE_INT
)
8685 nonzero
= ~(unsigned HOST_WIDE_INT
) 0;
8687 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8688 is the number of bits a full-width mask would have set.
8689 We need only shift if these are fewer than nonzero can
8690 hold. If not, we must keep all bits set in nonzero. */
8692 if (GET_MODE_PRECISION (GET_MODE (x
)) - INTVAL (XEXP (x
, 1))
8693 < HOST_BITS_PER_WIDE_INT
)
8694 nonzero
>>= INTVAL (XEXP (x
, 1))
8695 + HOST_BITS_PER_WIDE_INT
8696 - GET_MODE_PRECISION (GET_MODE (x
)) ;
8700 nonzero
= GET_MODE_MASK (GET_MODE (x
));
8701 nonzero
>>= INTVAL (XEXP (x
, 1));
8704 if ((mask
& ~nonzero
) == 0)
8706 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, GET_MODE (x
),
8707 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8708 if (GET_CODE (x
) != ASHIFTRT
)
8709 return force_to_mode (x
, mode
, mask
, next_select
);
8712 else if ((i
= exact_log2 (mask
)) >= 0)
8714 x
= simplify_shift_const
8715 (NULL_RTX
, LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8716 GET_MODE_PRECISION (GET_MODE (x
)) - 1 - i
);
8718 if (GET_CODE (x
) != ASHIFTRT
)
8719 return force_to_mode (x
, mode
, mask
, next_select
);
8723 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8724 even if the shift count isn't a constant. */
8726 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8727 XEXP (x
, 0), XEXP (x
, 1));
8731 /* If this is a zero- or sign-extension operation that just affects bits
8732 we don't care about, remove it. Be sure the call above returned
8733 something that is still a shift. */
8735 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8736 && CONST_INT_P (XEXP (x
, 1))
8737 && INTVAL (XEXP (x
, 1)) >= 0
8738 && (INTVAL (XEXP (x
, 1))
8739 <= GET_MODE_PRECISION (GET_MODE (x
)) - (floor_log2 (mask
) + 1))
8740 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8741 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8742 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8749 /* If the shift count is constant and we can do computations
8750 in the mode of X, compute where the bits we care about are.
8751 Otherwise, we can't do anything. Don't change the mode of
8752 the shift or propagate MODE into the shift, though. */
8753 if (CONST_INT_P (XEXP (x
, 1))
8754 && INTVAL (XEXP (x
, 1)) >= 0)
8756 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8758 gen_int_mode (mask
, GET_MODE (x
)),
8760 if (temp
&& CONST_INT_P (temp
))
8761 x
= simplify_gen_binary (code
, GET_MODE (x
),
8762 force_to_mode (XEXP (x
, 0), GET_MODE (x
),
8763 INTVAL (temp
), next_select
),
8769 /* If we just want the low-order bit, the NEG isn't needed since it
8770 won't change the low-order bit. */
8772 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8774 /* We need any bits less significant than the most significant bit in
8775 MASK since carries from those bits will affect the bits we are
8781 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8782 same as the XOR case above. Ensure that the constant we form is not
8783 wider than the mode of X. */
8785 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8786 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8787 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8788 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
8789 < GET_MODE_PRECISION (GET_MODE (x
)))
8790 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
8792 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)),
8794 temp
= simplify_gen_binary (XOR
, GET_MODE (x
),
8795 XEXP (XEXP (x
, 0), 0), temp
);
8796 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8797 temp
, XEXP (XEXP (x
, 0), 1));
8799 return force_to_mode (x
, mode
, mask
, next_select
);
8802 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8803 use the full mask inside the NOT. */
8807 op0
= gen_lowpart_or_truncate (op_mode
,
8808 force_to_mode (XEXP (x
, 0), mode
, mask
,
8810 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8811 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
8815 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8816 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8817 which is equal to STORE_FLAG_VALUE. */
8818 if ((mask
& ~STORE_FLAG_VALUE
) == 0
8819 && XEXP (x
, 1) == const0_rtx
8820 && GET_MODE (XEXP (x
, 0)) == mode
8821 && exact_log2 (nonzero_bits (XEXP (x
, 0), mode
)) >= 0
8822 && (nonzero_bits (XEXP (x
, 0), mode
)
8823 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
8824 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8829 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8830 written in a narrower mode. We play it safe and do not do so. */
8832 op0
= gen_lowpart_or_truncate (GET_MODE (x
),
8833 force_to_mode (XEXP (x
, 1), mode
,
8834 mask
, next_select
));
8835 op1
= gen_lowpart_or_truncate (GET_MODE (x
),
8836 force_to_mode (XEXP (x
, 2), mode
,
8837 mask
, next_select
));
8838 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
8839 x
= simplify_gen_ternary (IF_THEN_ELSE
, GET_MODE (x
),
8840 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
8848 /* Ensure we return a value of the proper mode. */
8849 return gen_lowpart_or_truncate (mode
, x
);
8852 /* Return nonzero if X is an expression that has one of two values depending on
8853 whether some other value is zero or nonzero. In that case, we return the
8854 value that is being tested, *PTRUE is set to the value if the rtx being
8855 returned has a nonzero value, and *PFALSE is set to the other alternative.
8857 If we return zero, we set *PTRUE and *PFALSE to X. */
8860 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
8862 machine_mode mode
= GET_MODE (x
);
8863 enum rtx_code code
= GET_CODE (x
);
8864 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
8865 unsigned HOST_WIDE_INT nz
;
8867 /* If we are comparing a value against zero, we are done. */
8868 if ((code
== NE
|| code
== EQ
)
8869 && XEXP (x
, 1) == const0_rtx
)
8871 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
8872 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
8876 /* If this is a unary operation whose operand has one of two values, apply
8877 our opcode to compute those values. */
8878 else if (UNARY_P (x
)
8879 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
8881 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
8882 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
8883 GET_MODE (XEXP (x
, 0)));
8887 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8888 make can't possibly match and would suppress other optimizations. */
8889 else if (code
== COMPARE
)
8892 /* If this is a binary operation, see if either side has only one of two
8893 values. If either one does or if both do and they are conditional on
8894 the same value, compute the new true and false values. */
8895 else if (BINARY_P (x
))
8897 cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
);
8898 cond1
= if_then_else_cond (XEXP (x
, 1), &true1
, &false1
);
8900 if ((cond0
!= 0 || cond1
!= 0)
8901 && ! (cond0
!= 0 && cond1
!= 0 && ! rtx_equal_p (cond0
, cond1
)))
8903 /* If if_then_else_cond returned zero, then true/false are the
8904 same rtl. We must copy one of them to prevent invalid rtl
8907 true0
= copy_rtx (true0
);
8908 else if (cond1
== 0)
8909 true1
= copy_rtx (true1
);
8911 if (COMPARISON_P (x
))
8913 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
8915 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
8920 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
8921 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
8924 return cond0
? cond0
: cond1
;
8927 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8928 operands is zero when the other is nonzero, and vice-versa,
8929 and STORE_FLAG_VALUE is 1 or -1. */
8931 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
8932 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
8934 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
8936 rtx op0
= XEXP (XEXP (x
, 0), 1);
8937 rtx op1
= XEXP (XEXP (x
, 1), 1);
8939 cond0
= XEXP (XEXP (x
, 0), 0);
8940 cond1
= XEXP (XEXP (x
, 1), 0);
8942 if (COMPARISON_P (cond0
)
8943 && COMPARISON_P (cond1
)
8944 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
8945 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
8946 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
8947 || ((swap_condition (GET_CODE (cond0
))
8948 == reversed_comparison_code (cond1
, NULL
))
8949 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
8950 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
8951 && ! side_effects_p (x
))
8953 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
8954 *pfalse
= simplify_gen_binary (MULT
, mode
,
8956 ? simplify_gen_unary (NEG
, mode
,
8964 /* Similarly for MULT, AND and UMIN, except that for these the result
8966 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
8967 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
8968 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
8970 cond0
= XEXP (XEXP (x
, 0), 0);
8971 cond1
= XEXP (XEXP (x
, 1), 0);
8973 if (COMPARISON_P (cond0
)
8974 && COMPARISON_P (cond1
)
8975 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
8976 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
8977 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
8978 || ((swap_condition (GET_CODE (cond0
))
8979 == reversed_comparison_code (cond1
, NULL
))
8980 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
8981 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
8982 && ! side_effects_p (x
))
8984 *ptrue
= *pfalse
= const0_rtx
;
8990 else if (code
== IF_THEN_ELSE
)
8992 /* If we have IF_THEN_ELSE already, extract the condition and
8993 canonicalize it if it is NE or EQ. */
8994 cond0
= XEXP (x
, 0);
8995 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
8996 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
8997 return XEXP (cond0
, 0);
8998 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9000 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9001 return XEXP (cond0
, 0);
9007 /* If X is a SUBREG, we can narrow both the true and false values
9008 if the inner expression, if there is a condition. */
9009 else if (code
== SUBREG
9010 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
9013 true0
= simplify_gen_subreg (mode
, true0
,
9014 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9015 false0
= simplify_gen_subreg (mode
, false0
,
9016 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9017 if (true0
&& false0
)
9025 /* If X is a constant, this isn't special and will cause confusions
9026 if we treat it as such. Likewise if it is equivalent to a constant. */
9027 else if (CONSTANT_P (x
)
9028 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9031 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9032 will be least confusing to the rest of the compiler. */
9033 else if (mode
== BImode
)
9035 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9039 /* If X is known to be either 0 or -1, those are the true and
9040 false values when testing X. */
9041 else if (x
== constm1_rtx
|| x
== const0_rtx
9042 || (mode
!= VOIDmode
9043 && num_sign_bit_copies (x
, mode
) == GET_MODE_PRECISION (mode
)))
9045 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9049 /* Likewise for 0 or a single bit. */
9050 else if (HWI_COMPUTABLE_MODE_P (mode
)
9051 && exact_log2 (nz
= nonzero_bits (x
, mode
)) >= 0)
9053 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9057 /* Otherwise fail; show no condition with true and false values the same. */
9058 *ptrue
= *pfalse
= x
;
9062 /* Return the value of expression X given the fact that condition COND
9063 is known to be true when applied to REG as its first operand and VAL
9064 as its second. X is known to not be shared and so can be modified in
9067 We only handle the simplest cases, and specifically those cases that
9068 arise with IF_THEN_ELSE expressions. */
9071 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9073 enum rtx_code code
= GET_CODE (x
);
9077 if (side_effects_p (x
))
9080 /* If either operand of the condition is a floating point value,
9081 then we have to avoid collapsing an EQ comparison. */
9083 && rtx_equal_p (x
, reg
)
9084 && ! FLOAT_MODE_P (GET_MODE (x
))
9085 && ! FLOAT_MODE_P (GET_MODE (val
)))
9088 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9091 /* If X is (abs REG) and we know something about REG's relationship
9092 with zero, we may be able to simplify this. */
9094 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9097 case GE
: case GT
: case EQ
:
9100 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9102 GET_MODE (XEXP (x
, 0)));
9107 /* The only other cases we handle are MIN, MAX, and comparisons if the
9108 operands are the same as REG and VAL. */
9110 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9112 if (rtx_equal_p (XEXP (x
, 0), val
))
9114 std::swap (val
, reg
);
9115 cond
= swap_condition (cond
);
9118 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9120 if (COMPARISON_P (x
))
9122 if (comparison_dominates_p (cond
, code
))
9123 return const_true_rtx
;
9125 code
= reversed_comparison_code (x
, NULL
);
9127 && comparison_dominates_p (cond
, code
))
9132 else if (code
== SMAX
|| code
== SMIN
9133 || code
== UMIN
|| code
== UMAX
)
9135 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9137 /* Do not reverse the condition when it is NE or EQ.
9138 This is because we cannot conclude anything about
9139 the value of 'SMAX (x, y)' when x is not equal to y,
9140 but we can when x equals y. */
9141 if ((code
== SMAX
|| code
== UMAX
)
9142 && ! (cond
== EQ
|| cond
== NE
))
9143 cond
= reverse_condition (cond
);
9148 return unsignedp
? x
: XEXP (x
, 1);
9150 return unsignedp
? x
: XEXP (x
, 0);
9152 return unsignedp
? XEXP (x
, 1) : x
;
9154 return unsignedp
? XEXP (x
, 0) : x
;
9161 else if (code
== SUBREG
)
9163 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9164 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9166 if (SUBREG_REG (x
) != r
)
9168 /* We must simplify subreg here, before we lose track of the
9169 original inner_mode. */
9170 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9171 inner_mode
, SUBREG_BYTE (x
));
9175 SUBST (SUBREG_REG (x
), r
);
9180 /* We don't have to handle SIGN_EXTEND here, because even in the
9181 case of replacing something with a modeless CONST_INT, a
9182 CONST_INT is already (supposed to be) a valid sign extension for
9183 its narrower mode, which implies it's already properly
9184 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9185 story is different. */
9186 else if (code
== ZERO_EXTEND
)
9188 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9189 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9191 if (XEXP (x
, 0) != r
)
9193 /* We must simplify the zero_extend here, before we lose
9194 track of the original inner_mode. */
9195 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9200 SUBST (XEXP (x
, 0), r
);
9206 fmt
= GET_RTX_FORMAT (code
);
9207 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9210 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9211 else if (fmt
[i
] == 'E')
9212 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9213 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9220 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9221 assignment as a field assignment. */
9224 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9226 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9228 if (GET_MODE_SIZE (GET_MODE (x
)) > GET_MODE_SIZE (GET_MODE (y
)))
9230 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9232 /* For big endian, adjust the memory offset. */
9233 if (BYTES_BIG_ENDIAN
)
9234 x
= adjust_address_nv (x
, GET_MODE (y
),
9235 -subreg_lowpart_offset (GET_MODE (x
),
9238 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9241 if (x
== y
|| rtx_equal_p (x
, y
))
9244 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9247 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9248 Note that all SUBREGs of MEM are paradoxical; otherwise they
9249 would have been rewritten. */
9250 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9251 && MEM_P (SUBREG_REG (y
))
9252 && rtx_equal_p (SUBREG_REG (y
),
9253 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9256 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9257 && MEM_P (SUBREG_REG (x
))
9258 && rtx_equal_p (SUBREG_REG (x
),
9259 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9262 /* We used to see if get_last_value of X and Y were the same but that's
9263 not correct. In one direction, we'll cause the assignment to have
9264 the wrong destination and in the case, we'll import a register into this
9265 insn that might have already have been dead. So fail if none of the
9266 above cases are true. */
9270 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9271 Return that assignment if so.
9273 We only handle the most common cases. */
9276 make_field_assignment (rtx x
)
9278 rtx dest
= SET_DEST (x
);
9279 rtx src
= SET_SRC (x
);
9284 unsigned HOST_WIDE_INT len
;
9288 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9289 a clear of a one-bit field. We will have changed it to
9290 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9293 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9294 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9295 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9296 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9298 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9301 return gen_rtx_SET (assign
, const0_rtx
);
9305 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9306 && subreg_lowpart_p (XEXP (src
, 0))
9307 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
9308 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
9309 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9310 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9311 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9312 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9314 assign
= make_extraction (VOIDmode
, dest
, 0,
9315 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9318 return gen_rtx_SET (assign
, const0_rtx
);
9322 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9324 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9325 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9326 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9328 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9331 return gen_rtx_SET (assign
, const1_rtx
);
9335 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9336 SRC is an AND with all bits of that field set, then we can discard
9338 if (GET_CODE (dest
) == ZERO_EXTRACT
9339 && CONST_INT_P (XEXP (dest
, 1))
9340 && GET_CODE (src
) == AND
9341 && CONST_INT_P (XEXP (src
, 1)))
9343 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9344 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9345 unsigned HOST_WIDE_INT ze_mask
;
9347 if (width
>= HOST_BITS_PER_WIDE_INT
)
9350 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9352 /* Complete overlap. We can remove the source AND. */
9353 if ((and_mask
& ze_mask
) == ze_mask
)
9354 return gen_rtx_SET (dest
, XEXP (src
, 0));
9356 /* Partial overlap. We can reduce the source AND. */
9357 if ((and_mask
& ze_mask
) != and_mask
)
9359 mode
= GET_MODE (src
);
9360 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9361 gen_int_mode (and_mask
& ze_mask
, mode
));
9362 return gen_rtx_SET (dest
, src
);
9366 /* The other case we handle is assignments into a constant-position
9367 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9368 a mask that has all one bits except for a group of zero bits and
9369 OTHER is known to have zeros where C1 has ones, this is such an
9370 assignment. Compute the position and length from C1. Shift OTHER
9371 to the appropriate position, force it to the required mode, and
9372 make the extraction. Check for the AND in both operands. */
9374 /* One or more SUBREGs might obscure the constant-position field
9375 assignment. The first one we are likely to encounter is an outer
9376 narrowing SUBREG, which we can just strip for the purposes of
9377 identifying the constant-field assignment. */
9378 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
))
9379 src
= SUBREG_REG (src
);
9381 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9384 rhs
= expand_compound_operation (XEXP (src
, 0));
9385 lhs
= expand_compound_operation (XEXP (src
, 1));
9387 if (GET_CODE (rhs
) == AND
9388 && CONST_INT_P (XEXP (rhs
, 1))
9389 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9390 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9391 /* The second SUBREG that might get in the way is a paradoxical
9392 SUBREG around the first operand of the AND. We want to
9393 pretend the operand is as wide as the destination here. We
9394 do this by adjusting the MEM to wider mode for the sole
9395 purpose of the call to rtx_equal_for_field_assignment_p. Also
9396 note this trick only works for MEMs. */
9397 else if (GET_CODE (rhs
) == AND
9398 && paradoxical_subreg_p (XEXP (rhs
, 0))
9399 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9400 && CONST_INT_P (XEXP (rhs
, 1))
9401 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9403 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9404 else if (GET_CODE (lhs
) == AND
9405 && CONST_INT_P (XEXP (lhs
, 1))
9406 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9407 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9408 /* The second SUBREG that might get in the way is a paradoxical
9409 SUBREG around the first operand of the AND. We want to
9410 pretend the operand is as wide as the destination here. We
9411 do this by adjusting the MEM to wider mode for the sole
9412 purpose of the call to rtx_equal_for_field_assignment_p. Also
9413 note this trick only works for MEMs. */
9414 else if (GET_CODE (lhs
) == AND
9415 && paradoxical_subreg_p (XEXP (lhs
, 0))
9416 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9417 && CONST_INT_P (XEXP (lhs
, 1))
9418 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9420 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9424 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (GET_MODE (dest
)), &len
);
9425 if (pos
< 0 || pos
+ len
> GET_MODE_PRECISION (GET_MODE (dest
))
9426 || GET_MODE_PRECISION (GET_MODE (dest
)) > HOST_BITS_PER_WIDE_INT
9427 || (c1
& nonzero_bits (other
, GET_MODE (dest
))) != 0)
9430 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9434 /* The mode to use for the source is the mode of the assignment, or of
9435 what is inside a possible STRICT_LOW_PART. */
9436 mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9437 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9439 /* Shift OTHER right POS places and make it the source, restricting it
9440 to the proper length and mode. */
9442 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9446 src
= force_to_mode (src
, mode
,
9447 GET_MODE_PRECISION (mode
) >= HOST_BITS_PER_WIDE_INT
9448 ? ~(unsigned HOST_WIDE_INT
) 0
9449 : ((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
9452 /* If SRC is masked by an AND that does not make a difference in
9453 the value being stored, strip it. */
9454 if (GET_CODE (assign
) == ZERO_EXTRACT
9455 && CONST_INT_P (XEXP (assign
, 1))
9456 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9457 && GET_CODE (src
) == AND
9458 && CONST_INT_P (XEXP (src
, 1))
9459 && UINTVAL (XEXP (src
, 1))
9460 == ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (assign
, 1))) - 1)
9461 src
= XEXP (src
, 0);
9463 return gen_rtx_SET (assign
, src
);
9466 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9470 apply_distributive_law (rtx x
)
9472 enum rtx_code code
= GET_CODE (x
);
9473 enum rtx_code inner_code
;
9474 rtx lhs
, rhs
, other
;
9477 /* Distributivity is not true for floating point as it can change the
9478 value. So we don't do it unless -funsafe-math-optimizations. */
9479 if (FLOAT_MODE_P (GET_MODE (x
))
9480 && ! flag_unsafe_math_optimizations
)
9483 /* The outer operation can only be one of the following: */
9484 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9485 && code
!= PLUS
&& code
!= MINUS
)
9491 /* If either operand is a primitive we can't do anything, so get out
9493 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9496 lhs
= expand_compound_operation (lhs
);
9497 rhs
= expand_compound_operation (rhs
);
9498 inner_code
= GET_CODE (lhs
);
9499 if (inner_code
!= GET_CODE (rhs
))
9502 /* See if the inner and outer operations distribute. */
9509 /* These all distribute except over PLUS. */
9510 if (code
== PLUS
|| code
== MINUS
)
9515 if (code
!= PLUS
&& code
!= MINUS
)
9520 /* This is also a multiply, so it distributes over everything. */
9523 /* This used to handle SUBREG, but this turned out to be counter-
9524 productive, since (subreg (op ...)) usually is not handled by
9525 insn patterns, and this "optimization" therefore transformed
9526 recognizable patterns into unrecognizable ones. Therefore the
9527 SUBREG case was removed from here.
9529 It is possible that distributing SUBREG over arithmetic operations
9530 leads to an intermediate result than can then be optimized further,
9531 e.g. by moving the outer SUBREG to the other side of a SET as done
9532 in simplify_set. This seems to have been the original intent of
9533 handling SUBREGs here.
9535 However, with current GCC this does not appear to actually happen,
9536 at least on major platforms. If some case is found where removing
9537 the SUBREG case here prevents follow-on optimizations, distributing
9538 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9544 /* Set LHS and RHS to the inner operands (A and B in the example
9545 above) and set OTHER to the common operand (C in the example).
9546 There is only one way to do this unless the inner operation is
9548 if (COMMUTATIVE_ARITH_P (lhs
)
9549 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9550 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9551 else if (COMMUTATIVE_ARITH_P (lhs
)
9552 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9553 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9554 else if (COMMUTATIVE_ARITH_P (lhs
)
9555 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9556 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9557 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9558 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9562 /* Form the new inner operation, seeing if it simplifies first. */
9563 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9565 /* There is one exception to the general way of distributing:
9566 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9567 if (code
== XOR
&& inner_code
== IOR
)
9570 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9573 /* We may be able to continuing distributing the result, so call
9574 ourselves recursively on the inner operation before forming the
9575 outer operation, which we return. */
9576 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9577 apply_distributive_law (tem
), other
);
9580 /* See if X is of the form (* (+ A B) C), and if so convert to
9581 (+ (* A C) (* B C)) and try to simplify.
9583 Most of the time, this results in no change. However, if some of
9584 the operands are the same or inverses of each other, simplifications
9587 For example, (and (ior A B) (not B)) can occur as the result of
9588 expanding a bit field assignment. When we apply the distributive
9589 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9590 which then simplifies to (and (A (not B))).
9592 Note that no checks happen on the validity of applying the inverse
9593 distributive law. This is pointless since we can do it in the
9594 few places where this routine is called.
9596 N is the index of the term that is decomposed (the arithmetic operation,
9597 i.e. (+ A B) in the first example above). !N is the index of the term that
9598 is distributed, i.e. of C in the first example above. */
9600 distribute_and_simplify_rtx (rtx x
, int n
)
9603 enum rtx_code outer_code
, inner_code
;
9604 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9606 /* Distributivity is not true for floating point as it can change the
9607 value. So we don't do it unless -funsafe-math-optimizations. */
9608 if (FLOAT_MODE_P (GET_MODE (x
))
9609 && ! flag_unsafe_math_optimizations
)
9612 decomposed
= XEXP (x
, n
);
9613 if (!ARITHMETIC_P (decomposed
))
9616 mode
= GET_MODE (x
);
9617 outer_code
= GET_CODE (x
);
9618 distributed
= XEXP (x
, !n
);
9620 inner_code
= GET_CODE (decomposed
);
9621 inner_op0
= XEXP (decomposed
, 0);
9622 inner_op1
= XEXP (decomposed
, 1);
9624 /* Special case (and (xor B C) (not A)), which is equivalent to
9625 (xor (ior A B) (ior A C)) */
9626 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9628 distributed
= XEXP (distributed
, 0);
9634 /* Distribute the second term. */
9635 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9636 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9640 /* Distribute the first term. */
9641 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9642 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9645 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9647 if (GET_CODE (tmp
) != outer_code
9648 && (set_src_cost (tmp
, optimize_this_for_speed_p
)
9649 < set_src_cost (x
, optimize_this_for_speed_p
)))
9655 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9656 in MODE. Return an equivalent form, if different from (and VAROP
9657 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9660 simplify_and_const_int_1 (machine_mode mode
, rtx varop
,
9661 unsigned HOST_WIDE_INT constop
)
9663 unsigned HOST_WIDE_INT nonzero
;
9664 unsigned HOST_WIDE_INT orig_constop
;
9669 orig_constop
= constop
;
9670 if (GET_CODE (varop
) == CLOBBER
)
9673 /* Simplify VAROP knowing that we will be only looking at some of the
9676 Note by passing in CONSTOP, we guarantee that the bits not set in
9677 CONSTOP are not significant and will never be examined. We must
9678 ensure that is the case by explicitly masking out those bits
9679 before returning. */
9680 varop
= force_to_mode (varop
, mode
, constop
, 0);
9682 /* If VAROP is a CLOBBER, we will fail so return it. */
9683 if (GET_CODE (varop
) == CLOBBER
)
9686 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9687 to VAROP and return the new constant. */
9688 if (CONST_INT_P (varop
))
9689 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9691 /* See what bits may be nonzero in VAROP. Unlike the general case of
9692 a call to nonzero_bits, here we don't care about bits outside
9695 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9697 /* Turn off all bits in the constant that are known to already be zero.
9698 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9699 which is tested below. */
9703 /* If we don't have any bits left, return zero. */
9707 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9708 a power of two, we can replace this with an ASHIFT. */
9709 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9710 && (i
= exact_log2 (constop
)) >= 0)
9711 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9713 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9714 or XOR, then try to apply the distributive law. This may eliminate
9715 operations if either branch can be simplified because of the AND.
9716 It may also make some cases more complex, but those cases probably
9717 won't match a pattern either with or without this. */
9719 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9723 apply_distributive_law
9724 (simplify_gen_binary (GET_CODE (varop
), GET_MODE (varop
),
9725 simplify_and_const_int (NULL_RTX
,
9729 simplify_and_const_int (NULL_RTX
,
9734 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9735 the AND and see if one of the operands simplifies to zero. If so, we
9736 may eliminate it. */
9738 if (GET_CODE (varop
) == PLUS
9739 && exact_log2 (constop
+ 1) >= 0)
9743 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9744 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9745 if (o0
== const0_rtx
)
9747 if (o1
== const0_rtx
)
9751 /* Make a SUBREG if necessary. If we can't make it, fail. */
9752 varop
= gen_lowpart (mode
, varop
);
9753 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
9756 /* If we are only masking insignificant bits, return VAROP. */
9757 if (constop
== nonzero
)
9760 if (varop
== orig_varop
&& constop
== orig_constop
)
9763 /* Otherwise, return an AND. */
9764 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
9768 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9771 Return an equivalent form, if different from X. Otherwise, return X. If
9772 X is zero, we are to always construct the equivalent form. */
9775 simplify_and_const_int (rtx x
, machine_mode mode
, rtx varop
,
9776 unsigned HOST_WIDE_INT constop
)
9778 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
9783 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
9784 gen_int_mode (constop
, mode
));
9785 if (GET_MODE (x
) != mode
)
9786 x
= gen_lowpart (mode
, x
);
9790 /* Given a REG, X, compute which bits in X can be nonzero.
9791 We don't care about bits outside of those defined in MODE.
9793 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9794 a shift, AND, or zero_extract, we can do better. */
9797 reg_nonzero_bits_for_combine (const_rtx x
, machine_mode mode
,
9798 const_rtx known_x ATTRIBUTE_UNUSED
,
9799 machine_mode known_mode ATTRIBUTE_UNUSED
,
9800 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED
,
9801 unsigned HOST_WIDE_INT
*nonzero
)
9806 /* If X is a register whose nonzero bits value is current, use it.
9807 Otherwise, if X is a register whose value we can find, use that
9808 value. Otherwise, use the previously-computed global nonzero bits
9809 for this register. */
9811 rsp
= ®_stat
[REGNO (x
)];
9812 if (rsp
->last_set_value
!= 0
9813 && (rsp
->last_set_mode
== mode
9814 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
9815 && GET_MODE_CLASS (mode
) == MODE_INT
))
9816 && ((rsp
->last_set_label
>= label_tick_ebb_start
9817 && rsp
->last_set_label
< label_tick
)
9818 || (rsp
->last_set_label
== label_tick
9819 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9820 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9821 && REGNO (x
) < reg_n_sets_max
9822 && REG_N_SETS (REGNO (x
)) == 1
9824 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9827 unsigned HOST_WIDE_INT mask
= rsp
->last_set_nonzero_bits
;
9829 if (GET_MODE_PRECISION (rsp
->last_set_mode
) < GET_MODE_PRECISION (mode
))
9830 /* We don't know anything about the upper bits. */
9831 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (rsp
->last_set_mode
);
9837 tem
= get_last_value (x
);
9841 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9842 tem
= sign_extend_short_imm (tem
, GET_MODE (x
),
9843 GET_MODE_PRECISION (mode
));
9847 else if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
9849 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
9851 if (GET_MODE_PRECISION (GET_MODE (x
)) < GET_MODE_PRECISION (mode
))
9852 /* We don't know anything about the upper bits. */
9853 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (GET_MODE (x
));
9861 /* Return the number of bits at the high-order end of X that are known to
9862 be equal to the sign bit. X will be used in mode MODE; if MODE is
9863 VOIDmode, X will be used in its own mode. The returned value will always
9864 be between 1 and the number of bits in MODE. */
9867 reg_num_sign_bit_copies_for_combine (const_rtx x
, machine_mode mode
,
9868 const_rtx known_x ATTRIBUTE_UNUSED
,
9869 machine_mode known_mode
9871 unsigned int known_ret ATTRIBUTE_UNUSED
,
9872 unsigned int *result
)
9877 rsp
= ®_stat
[REGNO (x
)];
9878 if (rsp
->last_set_value
!= 0
9879 && rsp
->last_set_mode
== mode
9880 && ((rsp
->last_set_label
>= label_tick_ebb_start
9881 && rsp
->last_set_label
< label_tick
)
9882 || (rsp
->last_set_label
== label_tick
9883 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9884 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9885 && REGNO (x
) < reg_n_sets_max
9886 && REG_N_SETS (REGNO (x
)) == 1
9888 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9891 *result
= rsp
->last_set_sign_bit_copies
;
9895 tem
= get_last_value (x
);
9899 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
9900 && GET_MODE_PRECISION (GET_MODE (x
)) == GET_MODE_PRECISION (mode
))
9901 *result
= rsp
->sign_bit_copies
;
9906 /* Return the number of "extended" bits there are in X, when interpreted
9907 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9908 unsigned quantities, this is the number of high-order zero bits.
9909 For signed quantities, this is the number of copies of the sign bit
9910 minus 1. In both case, this function returns the number of "spare"
9911 bits. For example, if two quantities for which this function returns
9912 at least 1 are added, the addition is known not to overflow.
9914 This function will always return 0 unless called during combine, which
9915 implies that it must be called from a define_split. */
9918 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
9920 if (nonzero_sign_valid
== 0)
9924 ? (HWI_COMPUTABLE_MODE_P (mode
)
9925 ? (unsigned int) (GET_MODE_PRECISION (mode
) - 1
9926 - floor_log2 (nonzero_bits (x
, mode
)))
9928 : num_sign_bit_copies (x
, mode
) - 1);
9931 /* This function is called from `simplify_shift_const' to merge two
9932 outer operations. Specifically, we have already found that we need
9933 to perform operation *POP0 with constant *PCONST0 at the outermost
9934 position. We would now like to also perform OP1 with constant CONST1
9935 (with *POP0 being done last).
9937 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9938 the resulting operation. *PCOMP_P is set to 1 if we would need to
9939 complement the innermost operand, otherwise it is unchanged.
9941 MODE is the mode in which the operation will be done. No bits outside
9942 the width of this mode matter. It is assumed that the width of this mode
9943 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9945 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9946 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9947 result is simply *PCONST0.
9949 If the resulting operation cannot be expressed as one operation, we
9950 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9953 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
9955 enum rtx_code op0
= *pop0
;
9956 HOST_WIDE_INT const0
= *pconst0
;
9958 const0
&= GET_MODE_MASK (mode
);
9959 const1
&= GET_MODE_MASK (mode
);
9961 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9965 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9968 if (op1
== UNKNOWN
|| op0
== SET
)
9971 else if (op0
== UNKNOWN
)
9972 op0
= op1
, const0
= const1
;
9974 else if (op0
== op1
)
9998 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9999 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10002 /* If the two constants aren't the same, we can't do anything. The
10003 remaining six cases can all be done. */
10004 else if (const0
!= const1
)
10012 /* (a & b) | b == b */
10014 else /* op1 == XOR */
10015 /* (a ^ b) | b == a | b */
10021 /* (a & b) ^ b == (~a) & b */
10022 op0
= AND
, *pcomp_p
= 1;
10023 else /* op1 == IOR */
10024 /* (a | b) ^ b == a & ~b */
10025 op0
= AND
, const0
= ~const0
;
10030 /* (a | b) & b == b */
10032 else /* op1 == XOR */
10033 /* (a ^ b) & b) == (~a) & b */
10040 /* Check for NO-OP cases. */
10041 const0
&= GET_MODE_MASK (mode
);
10043 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10045 else if (const0
== 0 && op0
== AND
)
10047 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10053 /* ??? Slightly redundant with the above mask, but not entirely.
10054 Moving this above means we'd have to sign-extend the mode mask
10055 for the final test. */
10056 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10057 *pconst0
= trunc_int_for_mode (const0
, mode
);
10062 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10063 the shift in. The original shift operation CODE is performed on OP in
10064 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10065 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10066 result of the shift is subject to operation OUTER_CODE with operand
10069 static machine_mode
10070 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10071 machine_mode orig_mode
, machine_mode mode
,
10072 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10074 if (orig_mode
== mode
)
10076 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10078 /* In general we can't perform in wider mode for right shift and rotate. */
10082 /* We can still widen if the bits brought in from the left are identical
10083 to the sign bit of ORIG_MODE. */
10084 if (num_sign_bit_copies (op
, mode
)
10085 > (unsigned) (GET_MODE_PRECISION (mode
)
10086 - GET_MODE_PRECISION (orig_mode
)))
10091 /* Similarly here but with zero bits. */
10092 if (HWI_COMPUTABLE_MODE_P (mode
)
10093 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10096 /* We can also widen if the bits brought in will be masked off. This
10097 operation is performed in ORIG_MODE. */
10098 if (outer_code
== AND
)
10100 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10103 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10112 gcc_unreachable ();
10119 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10120 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10121 if we cannot simplify it. Otherwise, return a simplified value.
10123 The shift is normally computed in the widest mode we find in VAROP, as
10124 long as it isn't a different number of words than RESULT_MODE. Exceptions
10125 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10128 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10129 rtx varop
, int orig_count
)
10131 enum rtx_code orig_code
= code
;
10132 rtx orig_varop
= varop
;
10134 machine_mode mode
= result_mode
;
10135 machine_mode shift_mode
, tmode
;
10136 unsigned int mode_words
10137 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10138 /* We form (outer_op (code varop count) (outer_const)). */
10139 enum rtx_code outer_op
= UNKNOWN
;
10140 HOST_WIDE_INT outer_const
= 0;
10141 int complement_p
= 0;
10144 /* Make sure and truncate the "natural" shift on the way in. We don't
10145 want to do this inside the loop as it makes it more difficult to
10147 if (SHIFT_COUNT_TRUNCATED
)
10148 orig_count
&= GET_MODE_BITSIZE (mode
) - 1;
10150 /* If we were given an invalid count, don't do anything except exactly
10151 what was requested. */
10153 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_PRECISION (mode
))
10156 count
= orig_count
;
10158 /* Unless one of the branches of the `if' in this loop does a `continue',
10159 we will `break' the loop after the `if'. */
10163 /* If we have an operand of (clobber (const_int 0)), fail. */
10164 if (GET_CODE (varop
) == CLOBBER
)
10167 /* Convert ROTATERT to ROTATE. */
10168 if (code
== ROTATERT
)
10170 unsigned int bitsize
= GET_MODE_PRECISION (result_mode
);
10172 if (VECTOR_MODE_P (result_mode
))
10173 count
= bitsize
/ GET_MODE_NUNITS (result_mode
) - count
;
10175 count
= bitsize
- count
;
10178 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
,
10179 mode
, outer_op
, outer_const
);
10181 /* Handle cases where the count is greater than the size of the mode
10182 minus 1. For ASHIFT, use the size minus one as the count (this can
10183 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10184 take the count modulo the size. For other shifts, the result is
10187 Since these shifts are being produced by the compiler by combining
10188 multiple operations, each of which are defined, we know what the
10189 result is supposed to be. */
10191 if (count
> (GET_MODE_PRECISION (shift_mode
) - 1))
10193 if (code
== ASHIFTRT
)
10194 count
= GET_MODE_PRECISION (shift_mode
) - 1;
10195 else if (code
== ROTATE
|| code
== ROTATERT
)
10196 count
%= GET_MODE_PRECISION (shift_mode
);
10199 /* We can't simply return zero because there may be an
10201 varop
= const0_rtx
;
10207 /* If we discovered we had to complement VAROP, leave. Making a NOT
10208 here would cause an infinite loop. */
10212 /* An arithmetic right shift of a quantity known to be -1 or 0
10214 if (code
== ASHIFTRT
10215 && (num_sign_bit_copies (varop
, shift_mode
)
10216 == GET_MODE_PRECISION (shift_mode
)))
10222 /* If we are doing an arithmetic right shift and discarding all but
10223 the sign bit copies, this is equivalent to doing a shift by the
10224 bitsize minus one. Convert it into that shift because it will often
10225 allow other simplifications. */
10227 if (code
== ASHIFTRT
10228 && (count
+ num_sign_bit_copies (varop
, shift_mode
)
10229 >= GET_MODE_PRECISION (shift_mode
)))
10230 count
= GET_MODE_PRECISION (shift_mode
) - 1;
10232 /* We simplify the tests below and elsewhere by converting
10233 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10234 `make_compound_operation' will convert it to an ASHIFTRT for
10235 those machines (such as VAX) that don't have an LSHIFTRT. */
10236 if (code
== ASHIFTRT
10237 && val_signbit_known_clear_p (shift_mode
,
10238 nonzero_bits (varop
, shift_mode
)))
10241 if (((code
== LSHIFTRT
10242 && HWI_COMPUTABLE_MODE_P (shift_mode
)
10243 && !(nonzero_bits (varop
, shift_mode
) >> count
))
10245 && HWI_COMPUTABLE_MODE_P (shift_mode
)
10246 && !((nonzero_bits (varop
, shift_mode
) << count
)
10247 & GET_MODE_MASK (shift_mode
))))
10248 && !side_effects_p (varop
))
10249 varop
= const0_rtx
;
10251 switch (GET_CODE (varop
))
10257 new_rtx
= expand_compound_operation (varop
);
10258 if (new_rtx
!= varop
)
10266 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10267 minus the width of a smaller mode, we can do this with a
10268 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10269 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10270 && ! mode_dependent_address_p (XEXP (varop
, 0),
10271 MEM_ADDR_SPACE (varop
))
10272 && ! MEM_VOLATILE_P (varop
)
10273 && (tmode
= mode_for_size (GET_MODE_BITSIZE (mode
) - count
,
10274 MODE_INT
, 1)) != BLKmode
)
10276 new_rtx
= adjust_address_nv (varop
, tmode
,
10277 BYTES_BIG_ENDIAN
? 0
10278 : count
/ BITS_PER_UNIT
);
10280 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10281 : ZERO_EXTEND
, mode
, new_rtx
);
10288 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10289 the same number of words as what we've seen so far. Then store
10290 the widest mode in MODE. */
10291 if (subreg_lowpart_p (varop
)
10292 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10293 > GET_MODE_SIZE (GET_MODE (varop
)))
10294 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10295 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10297 && GET_MODE_CLASS (GET_MODE (varop
)) == MODE_INT
10298 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop
))) == MODE_INT
)
10300 varop
= SUBREG_REG (varop
);
10301 if (GET_MODE_SIZE (GET_MODE (varop
)) > GET_MODE_SIZE (mode
))
10302 mode
= GET_MODE (varop
);
10308 /* Some machines use MULT instead of ASHIFT because MULT
10309 is cheaper. But it is still better on those machines to
10310 merge two shifts into one. */
10311 if (CONST_INT_P (XEXP (varop
, 1))
10312 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10315 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10317 GEN_INT (exact_log2 (
10318 UINTVAL (XEXP (varop
, 1)))));
10324 /* Similar, for when divides are cheaper. */
10325 if (CONST_INT_P (XEXP (varop
, 1))
10326 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10329 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10331 GEN_INT (exact_log2 (
10332 UINTVAL (XEXP (varop
, 1)))));
10338 /* If we are extracting just the sign bit of an arithmetic
10339 right shift, that shift is not needed. However, the sign
10340 bit of a wider mode may be different from what would be
10341 interpreted as the sign bit in a narrower mode, so, if
10342 the result is narrower, don't discard the shift. */
10343 if (code
== LSHIFTRT
10344 && count
== (GET_MODE_BITSIZE (result_mode
) - 1)
10345 && (GET_MODE_BITSIZE (result_mode
)
10346 >= GET_MODE_BITSIZE (GET_MODE (varop
))))
10348 varop
= XEXP (varop
, 0);
10352 /* ... fall through ... */
10357 /* Here we have two nested shifts. The result is usually the
10358 AND of a new shift with a mask. We compute the result below. */
10359 if (CONST_INT_P (XEXP (varop
, 1))
10360 && INTVAL (XEXP (varop
, 1)) >= 0
10361 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (GET_MODE (varop
))
10362 && HWI_COMPUTABLE_MODE_P (result_mode
)
10363 && HWI_COMPUTABLE_MODE_P (mode
)
10364 && !VECTOR_MODE_P (result_mode
))
10366 enum rtx_code first_code
= GET_CODE (varop
);
10367 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10368 unsigned HOST_WIDE_INT mask
;
10371 /* We have one common special case. We can't do any merging if
10372 the inner code is an ASHIFTRT of a smaller mode. However, if
10373 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10374 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10375 we can convert it to
10376 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10377 This simplifies certain SIGN_EXTEND operations. */
10378 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10379 && count
== (GET_MODE_PRECISION (result_mode
)
10380 - GET_MODE_PRECISION (GET_MODE (varop
))))
10382 /* C3 has the low-order C1 bits zero. */
10384 mask
= GET_MODE_MASK (mode
)
10385 & ~(((unsigned HOST_WIDE_INT
) 1 << first_count
) - 1);
10387 varop
= simplify_and_const_int (NULL_RTX
, result_mode
,
10388 XEXP (varop
, 0), mask
);
10389 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
, result_mode
,
10391 count
= first_count
;
10396 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10397 than C1 high-order bits equal to the sign bit, we can convert
10398 this to either an ASHIFT or an ASHIFTRT depending on the
10401 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10403 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10404 && GET_MODE (varop
) == shift_mode
10405 && (num_sign_bit_copies (XEXP (varop
, 0), shift_mode
)
10408 varop
= XEXP (varop
, 0);
10409 count
-= first_count
;
10419 /* There are some cases we can't do. If CODE is ASHIFTRT,
10420 we can only do this if FIRST_CODE is also ASHIFTRT.
10422 We can't do the case when CODE is ROTATE and FIRST_CODE is
10425 If the mode of this shift is not the mode of the outer shift,
10426 we can't do this if either shift is a right shift or ROTATE.
10428 Finally, we can't do any of these if the mode is too wide
10429 unless the codes are the same.
10431 Handle the case where the shift codes are the same
10434 if (code
== first_code
)
10436 if (GET_MODE (varop
) != result_mode
10437 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10438 || code
== ROTATE
))
10441 count
+= first_count
;
10442 varop
= XEXP (varop
, 0);
10446 if (code
== ASHIFTRT
10447 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10448 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
10449 || (GET_MODE (varop
) != result_mode
10450 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10451 || first_code
== ROTATE
10452 || code
== ROTATE
)))
10455 /* To compute the mask to apply after the shift, shift the
10456 nonzero bits of the inner shift the same way the
10457 outer shift will. */
10459 mask_rtx
= gen_int_mode (nonzero_bits (varop
, GET_MODE (varop
)),
10463 = simplify_const_binary_operation (code
, result_mode
, mask_rtx
,
10466 /* Give up if we can't compute an outer operation to use. */
10468 || !CONST_INT_P (mask_rtx
)
10469 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10471 result_mode
, &complement_p
))
10474 /* If the shifts are in the same direction, we add the
10475 counts. Otherwise, we subtract them. */
10476 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10477 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10478 count
+= first_count
;
10480 count
-= first_count
;
10482 /* If COUNT is positive, the new shift is usually CODE,
10483 except for the two exceptions below, in which case it is
10484 FIRST_CODE. If the count is negative, FIRST_CODE should
10487 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10488 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10490 else if (count
< 0)
10491 code
= first_code
, count
= -count
;
10493 varop
= XEXP (varop
, 0);
10497 /* If we have (A << B << C) for any shift, we can convert this to
10498 (A << C << B). This wins if A is a constant. Only try this if
10499 B is not a constant. */
10501 else if (GET_CODE (varop
) == code
10502 && CONST_INT_P (XEXP (varop
, 0))
10503 && !CONST_INT_P (XEXP (varop
, 1)))
10505 rtx new_rtx
= simplify_const_binary_operation (code
, mode
,
10508 varop
= gen_rtx_fmt_ee (code
, mode
, new_rtx
, XEXP (varop
, 1));
10515 if (VECTOR_MODE_P (mode
))
10518 /* Make this fit the case below. */
10519 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10525 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10526 with C the size of VAROP - 1 and the shift is logical if
10527 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10528 we have an (le X 0) operation. If we have an arithmetic shift
10529 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10530 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10532 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10533 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10534 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10535 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10536 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10537 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10540 varop
= gen_rtx_LE (GET_MODE (varop
), XEXP (varop
, 1),
10543 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10544 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10549 /* If we have (shift (logical)), move the logical to the outside
10550 to allow it to possibly combine with another logical and the
10551 shift to combine with another shift. This also canonicalizes to
10552 what a ZERO_EXTRACT looks like. Also, some machines have
10553 (and (shift)) insns. */
10555 if (CONST_INT_P (XEXP (varop
, 1))
10556 /* We can't do this if we have (ashiftrt (xor)) and the
10557 constant has its sign bit set in shift_mode with shift_mode
10558 wider than result_mode. */
10559 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10560 && result_mode
!= shift_mode
10561 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10563 && (new_rtx
= simplify_const_binary_operation
10564 (code
, result_mode
,
10565 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10566 GEN_INT (count
))) != 0
10567 && CONST_INT_P (new_rtx
)
10568 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10569 INTVAL (new_rtx
), result_mode
, &complement_p
))
10571 varop
= XEXP (varop
, 0);
10575 /* If we can't do that, try to simplify the shift in each arm of the
10576 logical expression, make a new logical expression, and apply
10577 the inverse distributive law. This also can't be done for
10578 (ashiftrt (xor)) where we've widened the shift and the constant
10579 changes the sign bit. */
10580 if (CONST_INT_P (XEXP (varop
, 1))
10581 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10582 && result_mode
!= shift_mode
10583 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10586 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10587 XEXP (varop
, 0), count
);
10588 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10589 XEXP (varop
, 1), count
);
10591 varop
= simplify_gen_binary (GET_CODE (varop
), shift_mode
,
10593 varop
= apply_distributive_law (varop
);
10601 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10602 says that the sign bit can be tested, FOO has mode MODE, C is
10603 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10604 that may be nonzero. */
10605 if (code
== LSHIFTRT
10606 && XEXP (varop
, 1) == const0_rtx
10607 && GET_MODE (XEXP (varop
, 0)) == result_mode
10608 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10609 && HWI_COMPUTABLE_MODE_P (result_mode
)
10610 && STORE_FLAG_VALUE
== -1
10611 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10612 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10615 varop
= XEXP (varop
, 0);
10622 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10623 than the number of bits in the mode is equivalent to A. */
10624 if (code
== LSHIFTRT
10625 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10626 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1)
10628 varop
= XEXP (varop
, 0);
10633 /* NEG commutes with ASHIFT since it is multiplication. Move the
10634 NEG outside to allow shifts to combine. */
10636 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0, result_mode
,
10639 varop
= XEXP (varop
, 0);
10645 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10646 is one less than the number of bits in the mode is
10647 equivalent to (xor A 1). */
10648 if (code
== LSHIFTRT
10649 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10650 && XEXP (varop
, 1) == constm1_rtx
10651 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10652 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10656 varop
= XEXP (varop
, 0);
10660 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10661 that might be nonzero in BAR are those being shifted out and those
10662 bits are known zero in FOO, we can replace the PLUS with FOO.
10663 Similarly in the other operand order. This code occurs when
10664 we are computing the size of a variable-size array. */
10666 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10667 && count
< HOST_BITS_PER_WIDE_INT
10668 && nonzero_bits (XEXP (varop
, 1), result_mode
) >> count
== 0
10669 && (nonzero_bits (XEXP (varop
, 1), result_mode
)
10670 & nonzero_bits (XEXP (varop
, 0), result_mode
)) == 0)
10672 varop
= XEXP (varop
, 0);
10675 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10676 && count
< HOST_BITS_PER_WIDE_INT
10677 && HWI_COMPUTABLE_MODE_P (result_mode
)
10678 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10680 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10681 & nonzero_bits (XEXP (varop
, 1),
10684 varop
= XEXP (varop
, 1);
10688 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10690 && CONST_INT_P (XEXP (varop
, 1))
10691 && (new_rtx
= simplify_const_binary_operation
10692 (ASHIFT
, result_mode
,
10693 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10694 GEN_INT (count
))) != 0
10695 && CONST_INT_P (new_rtx
)
10696 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
10697 INTVAL (new_rtx
), result_mode
, &complement_p
))
10699 varop
= XEXP (varop
, 0);
10703 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10704 signbit', and attempt to change the PLUS to an XOR and move it to
10705 the outer operation as is done above in the AND/IOR/XOR case
10706 leg for shift(logical). See details in logical handling above
10707 for reasoning in doing so. */
10708 if (code
== LSHIFTRT
10709 && CONST_INT_P (XEXP (varop
, 1))
10710 && mode_signbit_p (result_mode
, XEXP (varop
, 1))
10711 && (new_rtx
= simplify_const_binary_operation
10712 (code
, result_mode
,
10713 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10714 GEN_INT (count
))) != 0
10715 && CONST_INT_P (new_rtx
)
10716 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
10717 INTVAL (new_rtx
), result_mode
, &complement_p
))
10719 varop
= XEXP (varop
, 0);
10726 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10727 with C the size of VAROP - 1 and the shift is logical if
10728 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10729 we have a (gt X 0) operation. If the shift is arithmetic with
10730 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10731 we have a (neg (gt X 0)) operation. */
10733 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10734 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
10735 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10736 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10737 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10738 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
10739 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10742 varop
= gen_rtx_GT (GET_MODE (varop
), XEXP (varop
, 1),
10745 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10746 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10753 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10754 if the truncate does not affect the value. */
10755 if (code
== LSHIFTRT
10756 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
10757 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10758 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
10759 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop
, 0)))
10760 - GET_MODE_PRECISION (GET_MODE (varop
)))))
10762 rtx varop_inner
= XEXP (varop
, 0);
10765 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
10766 XEXP (varop_inner
, 0),
10768 (count
+ INTVAL (XEXP (varop_inner
, 1))));
10769 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
10782 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
, mode
,
10783 outer_op
, outer_const
);
10785 /* We have now finished analyzing the shift. The result should be
10786 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10787 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10788 to the result of the shift. OUTER_CONST is the relevant constant,
10789 but we must turn off all bits turned off in the shift. */
10791 if (outer_op
== UNKNOWN
10792 && orig_code
== code
&& orig_count
== count
10793 && varop
== orig_varop
10794 && shift_mode
== GET_MODE (varop
))
10797 /* Make a SUBREG if necessary. If we can't make it, fail. */
10798 varop
= gen_lowpart (shift_mode
, varop
);
10799 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10802 /* If we have an outer operation and we just made a shift, it is
10803 possible that we could have simplified the shift were it not
10804 for the outer operation. So try to do the simplification
10807 if (outer_op
!= UNKNOWN
)
10808 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
10813 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
10815 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10816 turn off all the bits that the shift would have turned off. */
10817 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
10818 x
= simplify_and_const_int (NULL_RTX
, shift_mode
, x
,
10819 GET_MODE_MASK (result_mode
) >> orig_count
);
10821 /* Do the remainder of the processing in RESULT_MODE. */
10822 x
= gen_lowpart_or_truncate (result_mode
, x
);
10824 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10827 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
10829 if (outer_op
!= UNKNOWN
)
10831 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
10832 && GET_MODE_PRECISION (result_mode
) < HOST_BITS_PER_WIDE_INT
)
10833 outer_const
= trunc_int_for_mode (outer_const
, result_mode
);
10835 if (outer_op
== AND
)
10836 x
= simplify_and_const_int (NULL_RTX
, result_mode
, x
, outer_const
);
10837 else if (outer_op
== SET
)
10839 /* This means that we have determined that the result is
10840 equivalent to a constant. This should be rare. */
10841 if (!side_effects_p (x
))
10842 x
= GEN_INT (outer_const
);
10844 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
10845 x
= simplify_gen_unary (outer_op
, result_mode
, x
, result_mode
);
10847 x
= simplify_gen_binary (outer_op
, result_mode
, x
,
10848 GEN_INT (outer_const
));
10854 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10855 The result of the shift is RESULT_MODE. If we cannot simplify it,
10856 return X or, if it is NULL, synthesize the expression with
10857 simplify_gen_binary. Otherwise, return a simplified value.
10859 The shift is normally computed in the widest mode we find in VAROP, as
10860 long as it isn't a different number of words than RESULT_MODE. Exceptions
10861 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10864 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
10865 rtx varop
, int count
)
10867 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
10872 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
10873 if (GET_MODE (x
) != result_mode
)
10874 x
= gen_lowpart (result_mode
, x
);
10879 /* A subroutine of recog_for_combine. See there for arguments and
10883 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
10885 rtx pat
= *pnewpat
;
10886 rtx pat_without_clobbers
;
10887 int insn_code_number
;
10888 int num_clobbers_to_add
= 0;
10890 rtx notes
= NULL_RTX
;
10891 rtx old_notes
, old_pat
;
10894 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10895 we use to indicate that something didn't match. If we find such a
10896 thing, force rejection. */
10897 if (GET_CODE (pat
) == PARALLEL
)
10898 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
10899 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
10900 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
10903 old_pat
= PATTERN (insn
);
10904 old_notes
= REG_NOTES (insn
);
10905 PATTERN (insn
) = pat
;
10906 REG_NOTES (insn
) = NULL_RTX
;
10908 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
10909 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10911 if (insn_code_number
< 0)
10912 fputs ("Failed to match this instruction:\n", dump_file
);
10914 fputs ("Successfully matched this instruction:\n", dump_file
);
10915 print_rtl_single (dump_file
, pat
);
10918 /* If it isn't, there is the possibility that we previously had an insn
10919 that clobbered some register as a side effect, but the combined
10920 insn doesn't need to do that. So try once more without the clobbers
10921 unless this represents an ASM insn. */
10923 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
10924 && GET_CODE (pat
) == PARALLEL
)
10928 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
10929 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
10932 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
10936 SUBST_INT (XVECLEN (pat
, 0), pos
);
10939 pat
= XVECEXP (pat
, 0, 0);
10941 PATTERN (insn
) = pat
;
10942 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
10943 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10945 if (insn_code_number
< 0)
10946 fputs ("Failed to match this instruction:\n", dump_file
);
10948 fputs ("Successfully matched this instruction:\n", dump_file
);
10949 print_rtl_single (dump_file
, pat
);
10953 pat_without_clobbers
= pat
;
10955 PATTERN (insn
) = old_pat
;
10956 REG_NOTES (insn
) = old_notes
;
10958 /* Recognize all noop sets, these will be killed by followup pass. */
10959 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
10960 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
10962 /* If we had any clobbers to add, make a new pattern than contains
10963 them. Then check to make sure that all of them are dead. */
10964 if (num_clobbers_to_add
)
10966 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
10967 rtvec_alloc (GET_CODE (pat
) == PARALLEL
10968 ? (XVECLEN (pat
, 0)
10969 + num_clobbers_to_add
)
10970 : num_clobbers_to_add
+ 1));
10972 if (GET_CODE (pat
) == PARALLEL
)
10973 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
10974 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
10976 XVECEXP (newpat
, 0, 0) = pat
;
10978 add_clobbers (newpat
, insn_code_number
);
10980 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
10981 i
< XVECLEN (newpat
, 0); i
++)
10983 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
10984 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
10986 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
10988 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
10989 notes
= alloc_reg_note (REG_UNUSED
,
10990 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
10996 if (insn_code_number
>= 0
10997 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
10999 old_pat
= PATTERN (insn
);
11000 old_notes
= REG_NOTES (insn
);
11001 old_icode
= INSN_CODE (insn
);
11002 PATTERN (insn
) = pat
;
11003 REG_NOTES (insn
) = notes
;
11005 /* Allow targets to reject combined insn. */
11006 if (!targetm
.legitimate_combined_insn (insn
))
11008 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11009 fputs ("Instruction not appropriate for target.",
11012 /* Callers expect recog_for_combine to strip
11013 clobbers from the pattern on failure. */
11014 pat
= pat_without_clobbers
;
11017 insn_code_number
= -1;
11020 PATTERN (insn
) = old_pat
;
11021 REG_NOTES (insn
) = old_notes
;
11022 INSN_CODE (insn
) = old_icode
;
11028 return insn_code_number
;
11031 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11032 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11033 Return whether anything was so changed. */
11036 change_zero_ext (rtx
*src
)
11038 bool changed
= false;
11040 subrtx_ptr_iterator::array_type array
;
11041 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11044 machine_mode mode
= GET_MODE (x
);
11047 if (GET_CODE (x
) == ZERO_EXTRACT
11048 && CONST_INT_P (XEXP (x
, 1))
11049 && CONST_INT_P (XEXP (x
, 2))
11050 && GET_MODE (XEXP (x
, 0)) == mode
)
11052 size
= INTVAL (XEXP (x
, 1));
11054 int start
= INTVAL (XEXP (x
, 2));
11055 if (BITS_BIG_ENDIAN
)
11056 start
= GET_MODE_PRECISION (mode
) - size
- start
;
11058 x
= gen_rtx_LSHIFTRT (mode
, XEXP (x
, 0), GEN_INT (start
));
11060 else if (GET_CODE (x
) == ZERO_EXTEND
11061 && GET_CODE (XEXP (x
, 0)) == SUBREG
11062 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
11063 && subreg_lowpart_p (XEXP (x
, 0)))
11065 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11066 x
= SUBREG_REG (XEXP (x
, 0));
11071 unsigned HOST_WIDE_INT mask
= 1;
11075 x
= gen_rtx_AND (mode
, x
, GEN_INT (mask
));
11084 /* Like recog, but we receive the address of a pointer to a new pattern.
11085 We try to match the rtx that the pointer points to.
11086 If that fails, we may try to modify or replace the pattern,
11087 storing the replacement into the same pointer object.
11089 Modifications include deletion or addition of CLOBBERs. If the
11090 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11091 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11092 (and undo if that fails).
11094 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11095 the CLOBBERs are placed.
11097 The value is the final insn code from the pattern ultimately matched,
11101 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11103 rtx pat
= PATTERN (insn
);
11104 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11105 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11106 return insn_code_number
;
11108 void *marker
= get_undo_marker ();
11109 bool changed
= false;
11111 if (GET_CODE (pat
) == SET
)
11112 changed
= change_zero_ext (&SET_SRC (pat
));
11113 else if (GET_CODE (pat
) == PARALLEL
)
11116 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11118 rtx set
= XVECEXP (pat
, 0, i
);
11119 if (GET_CODE (set
) == SET
)
11120 changed
|= change_zero_ext (&SET_SRC (set
));
11126 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11128 if (insn_code_number
< 0)
11129 undo_to_marker (marker
);
11132 return insn_code_number
;
11135 /* Like gen_lowpart_general but for use by combine. In combine it
11136 is not possible to create any new pseudoregs. However, it is
11137 safe to create invalid memory addresses, because combine will
11138 try to recognize them and all they will do is make the combine
11141 If for some reason this cannot do its job, an rtx
11142 (clobber (const_int 0)) is returned.
11143 An insn containing that will not be recognized. */
11146 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11148 machine_mode imode
= GET_MODE (x
);
11149 unsigned int osize
= GET_MODE_SIZE (omode
);
11150 unsigned int isize
= GET_MODE_SIZE (imode
);
11153 if (omode
== imode
)
11156 /* We can only support MODE being wider than a word if X is a
11157 constant integer or has a mode the same size. */
11158 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11159 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11162 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11163 won't know what to do. So we will strip off the SUBREG here and
11164 process normally. */
11165 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11167 x
= SUBREG_REG (x
);
11169 /* For use in case we fall down into the address adjustments
11170 further below, we need to adjust the known mode and size of
11171 x; imode and isize, since we just adjusted x. */
11172 imode
= GET_MODE (x
);
11174 if (imode
== omode
)
11177 isize
= GET_MODE_SIZE (imode
);
11180 result
= gen_lowpart_common (omode
, x
);
11189 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11191 if (MEM_VOLATILE_P (x
)
11192 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11195 /* If we want to refer to something bigger than the original memref,
11196 generate a paradoxical subreg instead. That will force a reload
11197 of the original memref X. */
11199 return gen_rtx_SUBREG (omode
, x
, 0);
11201 if (WORDS_BIG_ENDIAN
)
11202 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11204 /* Adjust the address so that the address-after-the-data is
11206 if (BYTES_BIG_ENDIAN
)
11207 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11209 return adjust_address_nv (x
, omode
, offset
);
11212 /* If X is a comparison operator, rewrite it in a new mode. This
11213 probably won't match, but may allow further simplifications. */
11214 else if (COMPARISON_P (x
))
11215 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11217 /* If we couldn't simplify X any other way, just enclose it in a
11218 SUBREG. Normally, this SUBREG won't match, but some patterns may
11219 include an explicit SUBREG or we may simplify it further in combine. */
11225 offset
= subreg_lowpart_offset (omode
, imode
);
11226 if (imode
== VOIDmode
)
11228 imode
= int_mode_for_mode (omode
);
11229 x
= gen_lowpart_common (imode
, x
);
11233 res
= simplify_gen_subreg (omode
, x
, imode
, offset
);
11239 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11242 /* Try to simplify a comparison between OP0 and a constant OP1,
11243 where CODE is the comparison code that will be tested, into a
11244 (CODE OP0 const0_rtx) form.
11246 The result is a possibly different comparison code to use.
11247 *POP1 may be updated. */
11249 static enum rtx_code
11250 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11251 rtx op0
, rtx
*pop1
)
11253 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11254 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11256 /* Get the constant we are comparing against and turn off all bits
11257 not on in our mode. */
11258 if (mode
!= VOIDmode
)
11259 const_op
= trunc_int_for_mode (const_op
, mode
);
11261 /* If we are comparing against a constant power of two and the value
11262 being compared can only have that single bit nonzero (e.g., it was
11263 `and'ed with that bit), we can replace this with a comparison
11266 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11267 || code
== LT
|| code
== LTU
)
11268 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11269 && exact_log2 (const_op
& GET_MODE_MASK (mode
)) >= 0
11270 && (nonzero_bits (op0
, mode
)
11271 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (mode
))))
11273 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11277 /* Similarly, if we are comparing a value known to be either -1 or
11278 0 with -1, change it to the opposite comparison against zero. */
11280 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11281 || code
== GEU
|| code
== LTU
)
11282 && num_sign_bit_copies (op0
, mode
) == mode_width
)
11284 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11288 /* Do some canonicalizations based on the comparison code. We prefer
11289 comparisons against zero and then prefer equality comparisons.
11290 If we can reduce the size of a constant, we will do that too. */
11294 /* < C is equivalent to <= (C - 1) */
11299 /* ... fall through to LE case below. */
11305 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11312 /* If we are doing a <= 0 comparison on a value known to have
11313 a zero sign bit, we can replace this with == 0. */
11314 else if (const_op
== 0
11315 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11316 && (nonzero_bits (op0
, mode
)
11317 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11323 /* >= C is equivalent to > (C - 1). */
11328 /* ... fall through to GT below. */
11334 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11341 /* If we are doing a > 0 comparison on a value known to have
11342 a zero sign bit, we can replace this with != 0. */
11343 else if (const_op
== 0
11344 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11345 && (nonzero_bits (op0
, mode
)
11346 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11352 /* < C is equivalent to <= (C - 1). */
11357 /* ... fall through ... */
11359 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11360 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11361 && (unsigned HOST_WIDE_INT
) const_op
11362 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1))
11372 /* unsigned <= 0 is equivalent to == 0 */
11375 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11376 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11377 && (unsigned HOST_WIDE_INT
) const_op
11378 == ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)) - 1)
11386 /* >= C is equivalent to > (C - 1). */
11391 /* ... fall through ... */
11394 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11395 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11396 && (unsigned HOST_WIDE_INT
) const_op
11397 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1))
11407 /* unsigned > 0 is equivalent to != 0 */
11410 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11411 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11412 && (unsigned HOST_WIDE_INT
) const_op
11413 == ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)) - 1)
11424 *pop1
= GEN_INT (const_op
);
11428 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11429 comparison code that will be tested.
11431 The result is a possibly different comparison code to use. *POP0 and
11432 *POP1 may be updated.
11434 It is possible that we might detect that a comparison is either always
11435 true or always false. However, we do not perform general constant
11436 folding in combine, so this knowledge isn't useful. Such tautologies
11437 should have been detected earlier. Hence we ignore all such cases. */
11439 static enum rtx_code
11440 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11446 machine_mode mode
, tmode
;
11448 /* Try a few ways of applying the same transformation to both operands. */
11451 #ifndef WORD_REGISTER_OPERATIONS
11452 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11453 so check specially. */
11454 if (code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11455 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11456 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11457 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11458 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11459 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11460 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0)))
11461 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0))))
11462 && CONST_INT_P (XEXP (op0
, 1))
11463 && XEXP (op0
, 1) == XEXP (op1
, 1)
11464 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11465 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11466 && (INTVAL (XEXP (op0
, 1))
11467 == (GET_MODE_PRECISION (GET_MODE (op0
))
11468 - (GET_MODE_PRECISION
11469 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))))))))
11471 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11472 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11476 /* If both operands are the same constant shift, see if we can ignore the
11477 shift. We can if the shift is a rotate or if the bits shifted out of
11478 this shift are known to be zero for both inputs and if the type of
11479 comparison is compatible with the shift. */
11480 if (GET_CODE (op0
) == GET_CODE (op1
)
11481 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11482 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11483 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11484 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11485 || (GET_CODE (op0
) == ASHIFTRT
11486 && (code
!= GTU
&& code
!= LTU
11487 && code
!= GEU
&& code
!= LEU
)))
11488 && CONST_INT_P (XEXP (op0
, 1))
11489 && INTVAL (XEXP (op0
, 1)) >= 0
11490 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11491 && XEXP (op0
, 1) == XEXP (op1
, 1))
11493 machine_mode mode
= GET_MODE (op0
);
11494 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11495 int shift_count
= INTVAL (XEXP (op0
, 1));
11497 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11498 mask
&= (mask
>> shift_count
) << shift_count
;
11499 else if (GET_CODE (op0
) == ASHIFT
)
11500 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11502 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11503 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11504 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11509 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11510 SUBREGs are of the same mode, and, in both cases, the AND would
11511 be redundant if the comparison was done in the narrower mode,
11512 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11513 and the operand's possibly nonzero bits are 0xffffff01; in that case
11514 if we only care about QImode, we don't need the AND). This case
11515 occurs if the output mode of an scc insn is not SImode and
11516 STORE_FLAG_VALUE == 1 (e.g., the 386).
11518 Similarly, check for a case where the AND's are ZERO_EXTEND
11519 operations from some narrower mode even though a SUBREG is not
11522 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11523 && CONST_INT_P (XEXP (op0
, 1))
11524 && CONST_INT_P (XEXP (op1
, 1)))
11526 rtx inner_op0
= XEXP (op0
, 0);
11527 rtx inner_op1
= XEXP (op1
, 0);
11528 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11529 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11532 if (paradoxical_subreg_p (inner_op0
)
11533 && GET_CODE (inner_op1
) == SUBREG
11534 && (GET_MODE (SUBREG_REG (inner_op0
))
11535 == GET_MODE (SUBREG_REG (inner_op1
)))
11536 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11537 <= HOST_BITS_PER_WIDE_INT
)
11538 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11539 GET_MODE (SUBREG_REG (inner_op0
)))))
11540 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11541 GET_MODE (SUBREG_REG (inner_op1
))))))
11543 op0
= SUBREG_REG (inner_op0
);
11544 op1
= SUBREG_REG (inner_op1
);
11546 /* The resulting comparison is always unsigned since we masked
11547 off the original sign bit. */
11548 code
= unsigned_condition (code
);
11554 for (tmode
= GET_CLASS_NARROWEST_MODE
11555 (GET_MODE_CLASS (GET_MODE (op0
)));
11556 tmode
!= GET_MODE (op0
); tmode
= GET_MODE_WIDER_MODE (tmode
))
11557 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11559 op0
= gen_lowpart (tmode
, inner_op0
);
11560 op1
= gen_lowpart (tmode
, inner_op1
);
11561 code
= unsigned_condition (code
);
11570 /* If both operands are NOT, we can strip off the outer operation
11571 and adjust the comparison code for swapped operands; similarly for
11572 NEG, except that this must be an equality comparison. */
11573 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11574 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11575 && (code
== EQ
|| code
== NE
)))
11576 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11582 /* If the first operand is a constant, swap the operands and adjust the
11583 comparison code appropriately, but don't do this if the second operand
11584 is already a constant integer. */
11585 if (swap_commutative_operands_p (op0
, op1
))
11587 std::swap (op0
, op1
);
11588 code
= swap_condition (code
);
11591 /* We now enter a loop during which we will try to simplify the comparison.
11592 For the most part, we only are concerned with comparisons with zero,
11593 but some things may really be comparisons with zero but not start
11594 out looking that way. */
11596 while (CONST_INT_P (op1
))
11598 machine_mode mode
= GET_MODE (op0
);
11599 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11600 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11601 int equality_comparison_p
;
11602 int sign_bit_comparison_p
;
11603 int unsigned_comparison_p
;
11604 HOST_WIDE_INT const_op
;
11606 /* We only want to handle integral modes. This catches VOIDmode,
11607 CCmode, and the floating-point modes. An exception is that we
11608 can handle VOIDmode if OP0 is a COMPARE or a comparison
11611 if (GET_MODE_CLASS (mode
) != MODE_INT
11612 && ! (mode
== VOIDmode
11613 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
11616 /* Try to simplify the compare to constant, possibly changing the
11617 comparison op, and/or changing op1 to zero. */
11618 code
= simplify_compare_const (code
, mode
, op0
, &op1
);
11619 const_op
= INTVAL (op1
);
11621 /* Compute some predicates to simplify code below. */
11623 equality_comparison_p
= (code
== EQ
|| code
== NE
);
11624 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
11625 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
11628 /* If this is a sign bit comparison and we can do arithmetic in
11629 MODE, say that we will only be needing the sign bit of OP0. */
11630 if (sign_bit_comparison_p
&& HWI_COMPUTABLE_MODE_P (mode
))
11631 op0
= force_to_mode (op0
, mode
,
11632 (unsigned HOST_WIDE_INT
) 1
11633 << (GET_MODE_PRECISION (mode
) - 1),
11636 /* Now try cases based on the opcode of OP0. If none of the cases
11637 does a "continue", we exit this loop immediately after the
11640 switch (GET_CODE (op0
))
11643 /* If we are extracting a single bit from a variable position in
11644 a constant that has only a single bit set and are comparing it
11645 with zero, we can convert this into an equality comparison
11646 between the position and the location of the single bit. */
11647 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11648 have already reduced the shift count modulo the word size. */
11649 if (!SHIFT_COUNT_TRUNCATED
11650 && CONST_INT_P (XEXP (op0
, 0))
11651 && XEXP (op0
, 1) == const1_rtx
11652 && equality_comparison_p
&& const_op
== 0
11653 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
11655 if (BITS_BIG_ENDIAN
)
11656 i
= BITS_PER_WORD
- 1 - i
;
11658 op0
= XEXP (op0
, 2);
11662 /* Result is nonzero iff shift count is equal to I. */
11663 code
= reverse_condition (code
);
11667 /* ... fall through ... */
11670 tem
= expand_compound_operation (op0
);
11679 /* If testing for equality, we can take the NOT of the constant. */
11680 if (equality_comparison_p
11681 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
11683 op0
= XEXP (op0
, 0);
11688 /* If just looking at the sign bit, reverse the sense of the
11690 if (sign_bit_comparison_p
)
11692 op0
= XEXP (op0
, 0);
11693 code
= (code
== GE
? LT
: GE
);
11699 /* If testing for equality, we can take the NEG of the constant. */
11700 if (equality_comparison_p
11701 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
11703 op0
= XEXP (op0
, 0);
11708 /* The remaining cases only apply to comparisons with zero. */
11712 /* When X is ABS or is known positive,
11713 (neg X) is < 0 if and only if X != 0. */
11715 if (sign_bit_comparison_p
11716 && (GET_CODE (XEXP (op0
, 0)) == ABS
11717 || (mode_width
<= HOST_BITS_PER_WIDE_INT
11718 && (nonzero_bits (XEXP (op0
, 0), mode
)
11719 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11722 op0
= XEXP (op0
, 0);
11723 code
= (code
== LT
? NE
: EQ
);
11727 /* If we have NEG of something whose two high-order bits are the
11728 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11729 if (num_sign_bit_copies (op0
, mode
) >= 2)
11731 op0
= XEXP (op0
, 0);
11732 code
= swap_condition (code
);
11738 /* If we are testing equality and our count is a constant, we
11739 can perform the inverse operation on our RHS. */
11740 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
11741 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
11742 op1
, XEXP (op0
, 1))) != 0)
11744 op0
= XEXP (op0
, 0);
11749 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11750 a particular bit. Convert it to an AND of a constant of that
11751 bit. This will be converted into a ZERO_EXTRACT. */
11752 if (const_op
== 0 && sign_bit_comparison_p
11753 && CONST_INT_P (XEXP (op0
, 1))
11754 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
11756 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
11757 ((unsigned HOST_WIDE_INT
) 1
11759 - INTVAL (XEXP (op0
, 1)))));
11760 code
= (code
== LT
? NE
: EQ
);
11764 /* Fall through. */
11767 /* ABS is ignorable inside an equality comparison with zero. */
11768 if (const_op
== 0 && equality_comparison_p
)
11770 op0
= XEXP (op0
, 0);
11776 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11777 (compare FOO CONST) if CONST fits in FOO's mode and we
11778 are either testing inequality or have an unsigned
11779 comparison with ZERO_EXTEND or a signed comparison with
11780 SIGN_EXTEND. But don't do it if we don't have a compare
11781 insn of the given mode, since we'd have to revert it
11782 later on, and then we wouldn't know whether to sign- or
11784 mode
= GET_MODE (XEXP (op0
, 0));
11785 if (GET_MODE_CLASS (mode
) == MODE_INT
11786 && ! unsigned_comparison_p
11787 && HWI_COMPUTABLE_MODE_P (mode
)
11788 && trunc_int_for_mode (const_op
, mode
) == const_op
11789 && have_insn_for (COMPARE
, mode
))
11791 op0
= XEXP (op0
, 0);
11797 /* Check for the case where we are comparing A - C1 with C2, that is
11799 (subreg:MODE (plus (A) (-C1))) op (C2)
11801 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11802 comparison in the wider mode. One of the following two conditions
11803 must be true in order for this to be valid:
11805 1. The mode extension results in the same bit pattern being added
11806 on both sides and the comparison is equality or unsigned. As
11807 C2 has been truncated to fit in MODE, the pattern can only be
11810 2. The mode extension results in the sign bit being copied on
11813 The difficulty here is that we have predicates for A but not for
11814 (A - C1) so we need to check that C1 is within proper bounds so
11815 as to perturbate A as little as possible. */
11817 if (mode_width
<= HOST_BITS_PER_WIDE_INT
11818 && subreg_lowpart_p (op0
)
11819 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) > mode_width
11820 && GET_CODE (SUBREG_REG (op0
)) == PLUS
11821 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
11823 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
11824 rtx a
= XEXP (SUBREG_REG (op0
), 0);
11825 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
11828 && (unsigned HOST_WIDE_INT
) c1
11829 < (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)
11830 && (equality_comparison_p
|| unsigned_comparison_p
)
11831 /* (A - C1) zero-extends if it is positive and sign-extends
11832 if it is negative, C2 both zero- and sign-extends. */
11833 && ((0 == (nonzero_bits (a
, inner_mode
)
11834 & ~GET_MODE_MASK (mode
))
11836 /* (A - C1) sign-extends if it is positive and 1-extends
11837 if it is negative, C2 both sign- and 1-extends. */
11838 || (num_sign_bit_copies (a
, inner_mode
)
11839 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
11842 || ((unsigned HOST_WIDE_INT
) c1
11843 < (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 2)
11844 /* (A - C1) always sign-extends, like C2. */
11845 && num_sign_bit_copies (a
, inner_mode
)
11846 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
11847 - (mode_width
- 1))))
11849 op0
= SUBREG_REG (op0
);
11854 /* If the inner mode is narrower and we are extracting the low part,
11855 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11856 if (subreg_lowpart_p (op0
)
11857 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) < mode_width
)
11858 /* Fall through */ ;
11862 /* ... fall through ... */
11865 mode
= GET_MODE (XEXP (op0
, 0));
11866 if (GET_MODE_CLASS (mode
) == MODE_INT
11867 && (unsigned_comparison_p
|| equality_comparison_p
)
11868 && HWI_COMPUTABLE_MODE_P (mode
)
11869 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
11871 && have_insn_for (COMPARE
, mode
))
11873 op0
= XEXP (op0
, 0);
11879 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11880 this for equality comparisons due to pathological cases involving
11882 if (equality_comparison_p
11883 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
11884 op1
, XEXP (op0
, 1))))
11886 op0
= XEXP (op0
, 0);
11891 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11892 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
11893 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
11895 op0
= XEXP (XEXP (op0
, 0), 0);
11896 code
= (code
== LT
? EQ
: NE
);
11902 /* We used to optimize signed comparisons against zero, but that
11903 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11904 arrive here as equality comparisons, or (GEU, LTU) are
11905 optimized away. No need to special-case them. */
11907 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11908 (eq B (minus A C)), whichever simplifies. We can only do
11909 this for equality comparisons due to pathological cases involving
11911 if (equality_comparison_p
11912 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
11913 XEXP (op0
, 1), op1
)))
11915 op0
= XEXP (op0
, 0);
11920 if (equality_comparison_p
11921 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
11922 XEXP (op0
, 0), op1
)))
11924 op0
= XEXP (op0
, 1);
11929 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11930 of bits in X minus 1, is one iff X > 0. */
11931 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
11932 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
11933 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
11934 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
11936 op0
= XEXP (op0
, 1);
11937 code
= (code
== GE
? LE
: GT
);
11943 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11944 if C is zero or B is a constant. */
11945 if (equality_comparison_p
11946 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
11947 XEXP (op0
, 1), op1
)))
11949 op0
= XEXP (op0
, 0);
11956 case UNEQ
: case LTGT
:
11957 case LT
: case LTU
: case UNLT
: case LE
: case LEU
: case UNLE
:
11958 case GT
: case GTU
: case UNGT
: case GE
: case GEU
: case UNGE
:
11959 case UNORDERED
: case ORDERED
:
11960 /* We can't do anything if OP0 is a condition code value, rather
11961 than an actual data value. */
11963 || CC0_P (XEXP (op0
, 0))
11964 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
11967 /* Get the two operands being compared. */
11968 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
11969 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
11971 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
11973 /* Check for the cases where we simply want the result of the
11974 earlier test or the opposite of that result. */
11975 if (code
== NE
|| code
== EQ
11976 || (val_signbit_known_set_p (GET_MODE (op0
), STORE_FLAG_VALUE
)
11977 && (code
== LT
|| code
== GE
)))
11979 enum rtx_code new_code
;
11980 if (code
== LT
|| code
== NE
)
11981 new_code
= GET_CODE (op0
);
11983 new_code
= reversed_comparison_code (op0
, NULL
);
11985 if (new_code
!= UNKNOWN
)
11996 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11998 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
11999 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12000 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12002 op0
= XEXP (op0
, 1);
12003 code
= (code
== GE
? GT
: LE
);
12009 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12010 will be converted to a ZERO_EXTRACT later. */
12011 if (const_op
== 0 && equality_comparison_p
12012 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12013 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12015 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12016 XEXP (XEXP (op0
, 0), 1));
12017 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12021 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12022 zero and X is a comparison and C1 and C2 describe only bits set
12023 in STORE_FLAG_VALUE, we can compare with X. */
12024 if (const_op
== 0 && equality_comparison_p
12025 && mode_width
<= HOST_BITS_PER_WIDE_INT
12026 && CONST_INT_P (XEXP (op0
, 1))
12027 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12028 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12029 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12030 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12032 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12033 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12034 if ((~STORE_FLAG_VALUE
& mask
) == 0
12035 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12036 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12037 && COMPARISON_P (tem
))))
12039 op0
= XEXP (XEXP (op0
, 0), 0);
12044 /* If we are doing an equality comparison of an AND of a bit equal
12045 to the sign bit, replace this with a LT or GE comparison of
12046 the underlying value. */
12047 if (equality_comparison_p
12049 && CONST_INT_P (XEXP (op0
, 1))
12050 && mode_width
<= HOST_BITS_PER_WIDE_INT
12051 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12052 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
12054 op0
= XEXP (op0
, 0);
12055 code
= (code
== EQ
? GE
: LT
);
12059 /* If this AND operation is really a ZERO_EXTEND from a narrower
12060 mode, the constant fits within that mode, and this is either an
12061 equality or unsigned comparison, try to do this comparison in
12066 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12067 -> (ne:DI (reg:SI 4) (const_int 0))
12069 unless TRULY_NOOP_TRUNCATION allows it or the register is
12070 known to hold a value of the required mode the
12071 transformation is invalid. */
12072 if ((equality_comparison_p
|| unsigned_comparison_p
)
12073 && CONST_INT_P (XEXP (op0
, 1))
12074 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12075 & GET_MODE_MASK (mode
))
12077 && const_op
>> i
== 0
12078 && (tmode
= mode_for_size (i
, MODE_INT
, 1)) != BLKmode
12079 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode
, GET_MODE (op0
))
12080 || (REG_P (XEXP (op0
, 0))
12081 && reg_truncated_to_mode (tmode
, XEXP (op0
, 0)))))
12083 op0
= gen_lowpart (tmode
, XEXP (op0
, 0));
12087 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
12088 fits in both M1 and M2 and the SUBREG is either paradoxical
12089 or represents the low part, permute the SUBREG and the AND
12091 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
)
12093 unsigned HOST_WIDE_INT c1
;
12094 tmode
= GET_MODE (SUBREG_REG (XEXP (op0
, 0)));
12095 /* Require an integral mode, to avoid creating something like
12097 if (SCALAR_INT_MODE_P (tmode
)
12098 /* It is unsafe to commute the AND into the SUBREG if the
12099 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12100 not defined. As originally written the upper bits
12101 have a defined value due to the AND operation.
12102 However, if we commute the AND inside the SUBREG then
12103 they no longer have defined values and the meaning of
12104 the code has been changed. */
12106 #ifdef WORD_REGISTER_OPERATIONS
12107 || (mode_width
> GET_MODE_PRECISION (tmode
)
12108 && mode_width
<= BITS_PER_WORD
)
12110 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12111 && subreg_lowpart_p (XEXP (op0
, 0))))
12112 && CONST_INT_P (XEXP (op0
, 1))
12113 && mode_width
<= HOST_BITS_PER_WIDE_INT
12114 && HWI_COMPUTABLE_MODE_P (tmode
)
12115 && ((c1
= INTVAL (XEXP (op0
, 1))) & ~mask
) == 0
12116 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12118 && c1
!= GET_MODE_MASK (tmode
))
12120 op0
= simplify_gen_binary (AND
, tmode
,
12121 SUBREG_REG (XEXP (op0
, 0)),
12122 gen_int_mode (c1
, tmode
));
12123 op0
= gen_lowpart (mode
, op0
);
12128 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12129 if (const_op
== 0 && equality_comparison_p
12130 && XEXP (op0
, 1) == const1_rtx
12131 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12133 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12134 XEXP (XEXP (op0
, 0), 0), 1);
12135 code
= (code
== NE
? EQ
: NE
);
12139 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12140 (eq (and (lshiftrt X) 1) 0).
12141 Also handle the case where (not X) is expressed using xor. */
12142 if (const_op
== 0 && equality_comparison_p
12143 && XEXP (op0
, 1) == const1_rtx
12144 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12146 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12147 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12149 if (GET_CODE (shift_op
) == NOT
12150 || (GET_CODE (shift_op
) == XOR
12151 && CONST_INT_P (XEXP (shift_op
, 1))
12152 && CONST_INT_P (shift_count
)
12153 && HWI_COMPUTABLE_MODE_P (mode
)
12154 && (UINTVAL (XEXP (shift_op
, 1))
12155 == (unsigned HOST_WIDE_INT
) 1
12156 << INTVAL (shift_count
))))
12159 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12160 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12161 code
= (code
== NE
? EQ
: NE
);
12168 /* If we have (compare (ashift FOO N) (const_int C)) and
12169 the high order N bits of FOO (N+1 if an inequality comparison)
12170 are known to be zero, we can do this by comparing FOO with C
12171 shifted right N bits so long as the low-order N bits of C are
12173 if (CONST_INT_P (XEXP (op0
, 1))
12174 && INTVAL (XEXP (op0
, 1)) >= 0
12175 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12176 < HOST_BITS_PER_WIDE_INT
)
12177 && (((unsigned HOST_WIDE_INT
) const_op
12178 & (((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (op0
, 1)))
12180 && mode_width
<= HOST_BITS_PER_WIDE_INT
12181 && (nonzero_bits (XEXP (op0
, 0), mode
)
12182 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12183 + ! equality_comparison_p
))) == 0)
12185 /* We must perform a logical shift, not an arithmetic one,
12186 as we want the top N bits of C to be zero. */
12187 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12189 temp
>>= INTVAL (XEXP (op0
, 1));
12190 op1
= gen_int_mode (temp
, mode
);
12191 op0
= XEXP (op0
, 0);
12195 /* If we are doing a sign bit comparison, it means we are testing
12196 a particular bit. Convert it to the appropriate AND. */
12197 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12198 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12200 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12201 ((unsigned HOST_WIDE_INT
) 1
12203 - INTVAL (XEXP (op0
, 1)))));
12204 code
= (code
== LT
? NE
: EQ
);
12208 /* If this an equality comparison with zero and we are shifting
12209 the low bit to the sign bit, we can convert this to an AND of the
12211 if (const_op
== 0 && equality_comparison_p
12212 && CONST_INT_P (XEXP (op0
, 1))
12213 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12215 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12221 /* If this is an equality comparison with zero, we can do this
12222 as a logical shift, which might be much simpler. */
12223 if (equality_comparison_p
&& const_op
== 0
12224 && CONST_INT_P (XEXP (op0
, 1)))
12226 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12228 INTVAL (XEXP (op0
, 1)));
12232 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12233 do the comparison in a narrower mode. */
12234 if (! unsigned_comparison_p
12235 && CONST_INT_P (XEXP (op0
, 1))
12236 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12237 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12238 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12239 MODE_INT
, 1)) != BLKmode
12240 && (((unsigned HOST_WIDE_INT
) const_op
12241 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12242 <= GET_MODE_MASK (tmode
)))
12244 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12248 /* Likewise if OP0 is a PLUS of a sign extension with a
12249 constant, which is usually represented with the PLUS
12250 between the shifts. */
12251 if (! unsigned_comparison_p
12252 && CONST_INT_P (XEXP (op0
, 1))
12253 && GET_CODE (XEXP (op0
, 0)) == PLUS
12254 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12255 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12256 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12257 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12258 MODE_INT
, 1)) != BLKmode
12259 && (((unsigned HOST_WIDE_INT
) const_op
12260 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12261 <= GET_MODE_MASK (tmode
)))
12263 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12264 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12265 rtx new_const
= simplify_gen_binary (ASHIFTRT
, GET_MODE (op0
),
12266 add_const
, XEXP (op0
, 1));
12268 op0
= simplify_gen_binary (PLUS
, tmode
,
12269 gen_lowpart (tmode
, inner
),
12274 /* ... fall through ... */
12276 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12277 the low order N bits of FOO are known to be zero, we can do this
12278 by comparing FOO with C shifted left N bits so long as no
12279 overflow occurs. Even if the low order N bits of FOO aren't known
12280 to be zero, if the comparison is >= or < we can use the same
12281 optimization and for > or <= by setting all the low
12282 order N bits in the comparison constant. */
12283 if (CONST_INT_P (XEXP (op0
, 1))
12284 && INTVAL (XEXP (op0
, 1)) > 0
12285 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12286 && mode_width
<= HOST_BITS_PER_WIDE_INT
12287 && (((unsigned HOST_WIDE_INT
) const_op
12288 + (GET_CODE (op0
) != LSHIFTRT
12289 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12292 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12294 unsigned HOST_WIDE_INT low_bits
12295 = (nonzero_bits (XEXP (op0
, 0), mode
)
12296 & (((unsigned HOST_WIDE_INT
) 1
12297 << INTVAL (XEXP (op0
, 1))) - 1));
12298 if (low_bits
== 0 || !equality_comparison_p
)
12300 /* If the shift was logical, then we must make the condition
12302 if (GET_CODE (op0
) == LSHIFTRT
)
12303 code
= unsigned_condition (code
);
12305 const_op
<<= INTVAL (XEXP (op0
, 1));
12307 && (code
== GT
|| code
== GTU
12308 || code
== LE
|| code
== LEU
))
12310 |= (((HOST_WIDE_INT
) 1 << INTVAL (XEXP (op0
, 1))) - 1);
12311 op1
= GEN_INT (const_op
);
12312 op0
= XEXP (op0
, 0);
12317 /* If we are using this shift to extract just the sign bit, we
12318 can replace this with an LT or GE comparison. */
12320 && (equality_comparison_p
|| sign_bit_comparison_p
)
12321 && CONST_INT_P (XEXP (op0
, 1))
12322 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12324 op0
= XEXP (op0
, 0);
12325 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12337 /* Now make any compound operations involved in this comparison. Then,
12338 check for an outmost SUBREG on OP0 that is not doing anything or is
12339 paradoxical. The latter transformation must only be performed when
12340 it is known that the "extra" bits will be the same in op0 and op1 or
12341 that they don't matter. There are three cases to consider:
12343 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12344 care bits and we can assume they have any convenient value. So
12345 making the transformation is safe.
12347 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
12348 In this case the upper bits of op0 are undefined. We should not make
12349 the simplification in that case as we do not know the contents of
12352 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12353 UNKNOWN. In that case we know those bits are zeros or ones. We must
12354 also be sure that they are the same as the upper bits of op1.
12356 We can never remove a SUBREG for a non-equality comparison because
12357 the sign bit is in a different place in the underlying object. */
12359 op0
= make_compound_operation (op0
, op1
== const0_rtx
? COMPARE
: SET
);
12360 op1
= make_compound_operation (op1
, SET
);
12362 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12363 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12364 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
12365 && (code
== NE
|| code
== EQ
))
12367 if (paradoxical_subreg_p (op0
))
12369 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12371 if (REG_P (SUBREG_REG (op0
)))
12373 op0
= SUBREG_REG (op0
);
12374 op1
= gen_lowpart (GET_MODE (op0
), op1
);
12377 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
)))
12378 <= HOST_BITS_PER_WIDE_INT
)
12379 && (nonzero_bits (SUBREG_REG (op0
),
12380 GET_MODE (SUBREG_REG (op0
)))
12381 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12383 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
12385 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
12386 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12387 op0
= SUBREG_REG (op0
), op1
= tem
;
12391 /* We now do the opposite procedure: Some machines don't have compare
12392 insns in all modes. If OP0's mode is an integer mode smaller than a
12393 word and we can't do a compare in that mode, see if there is a larger
12394 mode for which we can do the compare. There are a number of cases in
12395 which we can use the wider mode. */
12397 mode
= GET_MODE (op0
);
12398 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
12399 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12400 && ! have_insn_for (COMPARE
, mode
))
12401 for (tmode
= GET_MODE_WIDER_MODE (mode
);
12402 (tmode
!= VOIDmode
&& HWI_COMPUTABLE_MODE_P (tmode
));
12403 tmode
= GET_MODE_WIDER_MODE (tmode
))
12404 if (have_insn_for (COMPARE
, tmode
))
12408 /* If this is a test for negative, we can make an explicit
12409 test of the sign bit. Test this first so we can use
12410 a paradoxical subreg to extend OP0. */
12412 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12413 && HWI_COMPUTABLE_MODE_P (mode
))
12415 unsigned HOST_WIDE_INT sign
12416 = (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1);
12417 op0
= simplify_gen_binary (AND
, tmode
,
12418 gen_lowpart (tmode
, op0
),
12419 gen_int_mode (sign
, tmode
));
12420 code
= (code
== LT
) ? NE
: EQ
;
12424 /* If the only nonzero bits in OP0 and OP1 are those in the
12425 narrower mode and this is an equality or unsigned comparison,
12426 we can use the wider mode. Similarly for sign-extended
12427 values, in which case it is true for all comparisons. */
12428 zero_extended
= ((code
== EQ
|| code
== NE
12429 || code
== GEU
|| code
== GTU
12430 || code
== LEU
|| code
== LTU
)
12431 && (nonzero_bits (op0
, tmode
)
12432 & ~GET_MODE_MASK (mode
)) == 0
12433 && ((CONST_INT_P (op1
)
12434 || (nonzero_bits (op1
, tmode
)
12435 & ~GET_MODE_MASK (mode
)) == 0)));
12438 || ((num_sign_bit_copies (op0
, tmode
)
12439 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12440 - GET_MODE_PRECISION (mode
)))
12441 && (num_sign_bit_copies (op1
, tmode
)
12442 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12443 - GET_MODE_PRECISION (mode
)))))
12445 /* If OP0 is an AND and we don't have an AND in MODE either,
12446 make a new AND in the proper mode. */
12447 if (GET_CODE (op0
) == AND
12448 && !have_insn_for (AND
, mode
))
12449 op0
= simplify_gen_binary (AND
, tmode
,
12450 gen_lowpart (tmode
,
12452 gen_lowpart (tmode
,
12458 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op0
, mode
);
12459 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op1
, mode
);
12463 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op0
, mode
);
12464 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op1
, mode
);
12471 /* We may have changed the comparison operands. Re-canonicalize. */
12472 if (swap_commutative_operands_p (op0
, op1
))
12474 std::swap (op0
, op1
);
12475 code
= swap_condition (code
);
12478 /* If this machine only supports a subset of valid comparisons, see if we
12479 can convert an unsupported one into a supported one. */
12480 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12488 /* Utility function for record_value_for_reg. Count number of
12493 enum rtx_code code
= GET_CODE (x
);
12497 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12498 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12500 rtx x0
= XEXP (x
, 0);
12501 rtx x1
= XEXP (x
, 1);
12504 return 1 + 2 * count_rtxs (x0
);
12506 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12507 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12508 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12509 return 2 + 2 * count_rtxs (x0
)
12510 + count_rtxs (x
== XEXP (x1
, 0)
12511 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12513 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12514 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12515 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12516 return 2 + 2 * count_rtxs (x1
)
12517 + count_rtxs (x
== XEXP (x0
, 0)
12518 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12521 fmt
= GET_RTX_FORMAT (code
);
12522 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12524 ret
+= count_rtxs (XEXP (x
, i
));
12525 else if (fmt
[i
] == 'E')
12526 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12527 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12532 /* Utility function for following routine. Called when X is part of a value
12533 being stored into last_set_value. Sets last_set_table_tick
12534 for each register mentioned. Similar to mention_regs in cse.c */
12537 update_table_tick (rtx x
)
12539 enum rtx_code code
= GET_CODE (x
);
12540 const char *fmt
= GET_RTX_FORMAT (code
);
12545 unsigned int regno
= REGNO (x
);
12546 unsigned int endregno
= END_REGNO (x
);
12549 for (r
= regno
; r
< endregno
; r
++)
12551 reg_stat_type
*rsp
= ®_stat
[r
];
12552 rsp
->last_set_table_tick
= label_tick
;
12558 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12561 /* Check for identical subexpressions. If x contains
12562 identical subexpression we only have to traverse one of
12564 if (i
== 0 && ARITHMETIC_P (x
))
12566 /* Note that at this point x1 has already been
12568 rtx x0
= XEXP (x
, 0);
12569 rtx x1
= XEXP (x
, 1);
12571 /* If x0 and x1 are identical then there is no need to
12576 /* If x0 is identical to a subexpression of x1 then while
12577 processing x1, x0 has already been processed. Thus we
12578 are done with x. */
12579 if (ARITHMETIC_P (x1
)
12580 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12583 /* If x1 is identical to a subexpression of x0 then we
12584 still have to process the rest of x0. */
12585 if (ARITHMETIC_P (x0
)
12586 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12588 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
12593 update_table_tick (XEXP (x
, i
));
12595 else if (fmt
[i
] == 'E')
12596 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12597 update_table_tick (XVECEXP (x
, i
, j
));
12600 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12601 are saying that the register is clobbered and we no longer know its
12602 value. If INSN is zero, don't update reg_stat[].last_set; this is
12603 only permitted with VALUE also zero and is used to invalidate the
12607 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
12609 unsigned int regno
= REGNO (reg
);
12610 unsigned int endregno
= END_REGNO (reg
);
12612 reg_stat_type
*rsp
;
12614 /* If VALUE contains REG and we have a previous value for REG, substitute
12615 the previous value. */
12616 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
12620 /* Set things up so get_last_value is allowed to see anything set up to
12622 subst_low_luid
= DF_INSN_LUID (insn
);
12623 tem
= get_last_value (reg
);
12625 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12626 it isn't going to be useful and will take a lot of time to process,
12627 so just use the CLOBBER. */
12631 if (ARITHMETIC_P (tem
)
12632 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
12633 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
12634 tem
= XEXP (tem
, 0);
12635 else if (count_occurrences (value
, reg
, 1) >= 2)
12637 /* If there are two or more occurrences of REG in VALUE,
12638 prevent the value from growing too much. */
12639 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
12640 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
12643 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
12647 /* For each register modified, show we don't know its value, that
12648 we don't know about its bitwise content, that its value has been
12649 updated, and that we don't know the location of the death of the
12651 for (i
= regno
; i
< endregno
; i
++)
12653 rsp
= ®_stat
[i
];
12656 rsp
->last_set
= insn
;
12658 rsp
->last_set_value
= 0;
12659 rsp
->last_set_mode
= VOIDmode
;
12660 rsp
->last_set_nonzero_bits
= 0;
12661 rsp
->last_set_sign_bit_copies
= 0;
12662 rsp
->last_death
= 0;
12663 rsp
->truncated_to_mode
= VOIDmode
;
12666 /* Mark registers that are being referenced in this value. */
12668 update_table_tick (value
);
12670 /* Now update the status of each register being set.
12671 If someone is using this register in this block, set this register
12672 to invalid since we will get confused between the two lives in this
12673 basic block. This makes using this register always invalid. In cse, we
12674 scan the table to invalidate all entries using this register, but this
12675 is too much work for us. */
12677 for (i
= regno
; i
< endregno
; i
++)
12679 rsp
= ®_stat
[i
];
12680 rsp
->last_set_label
= label_tick
;
12682 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
12683 rsp
->last_set_invalid
= 1;
12685 rsp
->last_set_invalid
= 0;
12688 /* The value being assigned might refer to X (like in "x++;"). In that
12689 case, we must replace it with (clobber (const_int 0)) to prevent
12691 rsp
= ®_stat
[regno
];
12692 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
12694 value
= copy_rtx (value
);
12695 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
12699 /* For the main register being modified, update the value, the mode, the
12700 nonzero bits, and the number of sign bit copies. */
12702 rsp
->last_set_value
= value
;
12706 machine_mode mode
= GET_MODE (reg
);
12707 subst_low_luid
= DF_INSN_LUID (insn
);
12708 rsp
->last_set_mode
= mode
;
12709 if (GET_MODE_CLASS (mode
) == MODE_INT
12710 && HWI_COMPUTABLE_MODE_P (mode
))
12711 mode
= nonzero_bits_mode
;
12712 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
12713 rsp
->last_set_sign_bit_copies
12714 = num_sign_bit_copies (value
, GET_MODE (reg
));
12718 /* Called via note_stores from record_dead_and_set_regs to handle one
12719 SET or CLOBBER in an insn. DATA is the instruction in which the
12720 set is occurring. */
12723 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
12725 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
12727 if (GET_CODE (dest
) == SUBREG
)
12728 dest
= SUBREG_REG (dest
);
12730 if (!record_dead_insn
)
12733 record_value_for_reg (dest
, NULL
, NULL_RTX
);
12739 /* If we are setting the whole register, we know its value. Otherwise
12740 show that we don't know the value. We can handle SUBREG in
12742 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
12743 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
12744 else if (GET_CODE (setter
) == SET
12745 && GET_CODE (SET_DEST (setter
)) == SUBREG
12746 && SUBREG_REG (SET_DEST (setter
)) == dest
12747 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
12748 && subreg_lowpart_p (SET_DEST (setter
)))
12749 record_value_for_reg (dest
, record_dead_insn
,
12750 gen_lowpart (GET_MODE (dest
),
12751 SET_SRC (setter
)));
12753 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
12755 else if (MEM_P (dest
)
12756 /* Ignore pushes, they clobber nothing. */
12757 && ! push_operand (dest
, GET_MODE (dest
)))
12758 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
12761 /* Update the records of when each REG was most recently set or killed
12762 for the things done by INSN. This is the last thing done in processing
12763 INSN in the combiner loop.
12765 We update reg_stat[], in particular fields last_set, last_set_value,
12766 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12767 last_death, and also the similar information mem_last_set (which insn
12768 most recently modified memory) and last_call_luid (which insn was the
12769 most recent subroutine call). */
12772 record_dead_and_set_regs (rtx_insn
*insn
)
12777 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
12779 if (REG_NOTE_KIND (link
) == REG_DEAD
12780 && REG_P (XEXP (link
, 0)))
12782 unsigned int regno
= REGNO (XEXP (link
, 0));
12783 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
12785 for (i
= regno
; i
< endregno
; i
++)
12787 reg_stat_type
*rsp
;
12789 rsp
= ®_stat
[i
];
12790 rsp
->last_death
= insn
;
12793 else if (REG_NOTE_KIND (link
) == REG_INC
)
12794 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
12799 hard_reg_set_iterator hrsi
;
12800 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
12802 reg_stat_type
*rsp
;
12804 rsp
= ®_stat
[i
];
12805 rsp
->last_set_invalid
= 1;
12806 rsp
->last_set
= insn
;
12807 rsp
->last_set_value
= 0;
12808 rsp
->last_set_mode
= VOIDmode
;
12809 rsp
->last_set_nonzero_bits
= 0;
12810 rsp
->last_set_sign_bit_copies
= 0;
12811 rsp
->last_death
= 0;
12812 rsp
->truncated_to_mode
= VOIDmode
;
12815 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
12817 /* We can't combine into a call pattern. Remember, though, that
12818 the return value register is set at this LUID. We could
12819 still replace a register with the return value from the
12820 wrong subroutine call! */
12821 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
12824 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
12827 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12828 register present in the SUBREG, so for each such SUBREG go back and
12829 adjust nonzero and sign bit information of the registers that are
12830 known to have some zero/sign bits set.
12832 This is needed because when combine blows the SUBREGs away, the
12833 information on zero/sign bits is lost and further combines can be
12834 missed because of that. */
12837 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
12839 struct insn_link
*links
;
12841 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
12842 machine_mode mode
= GET_MODE (subreg
);
12844 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
12847 for (links
= LOG_LINKS (insn
); links
;)
12849 reg_stat_type
*rsp
;
12851 insn
= links
->insn
;
12852 set
= single_set (insn
);
12854 if (! set
|| !REG_P (SET_DEST (set
))
12855 || REGNO (SET_DEST (set
)) != regno
12856 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
12858 links
= links
->next
;
12862 rsp
= ®_stat
[regno
];
12863 if (rsp
->last_set
== insn
)
12865 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
12866 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
12869 if (REG_P (SET_SRC (set
)))
12871 regno
= REGNO (SET_SRC (set
));
12872 links
= LOG_LINKS (insn
);
12879 /* Check if X, a register, is known to contain a value already
12880 truncated to MODE. In this case we can use a subreg to refer to
12881 the truncated value even though in the generic case we would need
12882 an explicit truncation. */
12885 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
12887 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
12888 machine_mode truncated
= rsp
->truncated_to_mode
;
12891 || rsp
->truncation_label
< label_tick_ebb_start
)
12893 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
12895 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
12900 /* If X is a hard reg or a subreg record the mode that the register is
12901 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
12902 to turn a truncate into a subreg using this information. Return true
12903 if traversing X is complete. */
12906 record_truncated_value (rtx x
)
12908 machine_mode truncated_mode
;
12909 reg_stat_type
*rsp
;
12911 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
12913 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
12914 truncated_mode
= GET_MODE (x
);
12916 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
12919 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
12922 x
= SUBREG_REG (x
);
12924 /* ??? For hard-regs we now record everything. We might be able to
12925 optimize this using last_set_mode. */
12926 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
12927 truncated_mode
= GET_MODE (x
);
12931 rsp
= ®_stat
[REGNO (x
)];
12932 if (rsp
->truncated_to_mode
== 0
12933 || rsp
->truncation_label
< label_tick_ebb_start
12934 || (GET_MODE_SIZE (truncated_mode
)
12935 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
12937 rsp
->truncated_to_mode
= truncated_mode
;
12938 rsp
->truncation_label
= label_tick
;
12944 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12945 the modes they are used in. This can help truning TRUNCATEs into
12949 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
12951 subrtx_var_iterator::array_type array
;
12952 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
12953 if (record_truncated_value (*iter
))
12954 iter
.skip_subrtxes ();
12957 /* Scan X for promoted SUBREGs. For each one found,
12958 note what it implies to the registers used in it. */
12961 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
12963 if (GET_CODE (x
) == SUBREG
12964 && SUBREG_PROMOTED_VAR_P (x
)
12965 && REG_P (SUBREG_REG (x
)))
12966 record_promoted_value (insn
, x
);
12969 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
12972 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
12976 check_promoted_subreg (insn
, XEXP (x
, i
));
12980 if (XVEC (x
, i
) != 0)
12981 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12982 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
12988 /* Verify that all the registers and memory references mentioned in *LOC are
12989 still valid. *LOC was part of a value set in INSN when label_tick was
12990 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12991 the invalid references with (clobber (const_int 0)) and return 1. This
12992 replacement is useful because we often can get useful information about
12993 the form of a value (e.g., if it was produced by a shift that always
12994 produces -1 or 0) even though we don't know exactly what registers it
12995 was produced from. */
12998 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13001 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13002 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13007 unsigned int regno
= REGNO (x
);
13008 unsigned int endregno
= END_REGNO (x
);
13011 for (j
= regno
; j
< endregno
; j
++)
13013 reg_stat_type
*rsp
= ®_stat
[j
];
13014 if (rsp
->last_set_invalid
13015 /* If this is a pseudo-register that was only set once and not
13016 live at the beginning of the function, it is always valid. */
13017 || (! (regno
>= FIRST_PSEUDO_REGISTER
13018 && regno
< reg_n_sets_max
13019 && REG_N_SETS (regno
) == 1
13020 && (!REGNO_REG_SET_P
13021 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13023 && rsp
->last_set_label
> tick
))
13026 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13033 /* If this is a memory reference, make sure that there were no stores after
13034 it that might have clobbered the value. We don't have alias info, so we
13035 assume any store invalidates it. Moreover, we only have local UIDs, so
13036 we also assume that there were stores in the intervening basic blocks. */
13037 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13038 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13041 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13045 for (i
= 0; i
< len
; i
++)
13049 /* Check for identical subexpressions. If x contains
13050 identical subexpression we only have to traverse one of
13052 if (i
== 1 && ARITHMETIC_P (x
))
13054 /* Note that at this point x0 has already been checked
13055 and found valid. */
13056 rtx x0
= XEXP (x
, 0);
13057 rtx x1
= XEXP (x
, 1);
13059 /* If x0 and x1 are identical then x is also valid. */
13063 /* If x1 is identical to a subexpression of x0 then
13064 while checking x0, x1 has already been checked. Thus
13065 it is valid and so as x. */
13066 if (ARITHMETIC_P (x0
)
13067 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13070 /* If x0 is identical to a subexpression of x1 then x is
13071 valid iff the rest of x1 is valid. */
13072 if (ARITHMETIC_P (x1
)
13073 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13075 get_last_value_validate (&XEXP (x1
,
13076 x0
== XEXP (x1
, 0) ? 1 : 0),
13077 insn
, tick
, replace
);
13080 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13084 else if (fmt
[i
] == 'E')
13085 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13086 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13087 insn
, tick
, replace
) == 0)
13091 /* If we haven't found a reason for it to be invalid, it is valid. */
13095 /* Get the last value assigned to X, if known. Some registers
13096 in the value may be replaced with (clobber (const_int 0)) if their value
13097 is known longer known reliably. */
13100 get_last_value (const_rtx x
)
13102 unsigned int regno
;
13104 reg_stat_type
*rsp
;
13106 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13107 then convert it to the desired mode. If this is a paradoxical SUBREG,
13108 we cannot predict what values the "extra" bits might have. */
13109 if (GET_CODE (x
) == SUBREG
13110 && subreg_lowpart_p (x
)
13111 && !paradoxical_subreg_p (x
)
13112 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13113 return gen_lowpart (GET_MODE (x
), value
);
13119 rsp
= ®_stat
[regno
];
13120 value
= rsp
->last_set_value
;
13122 /* If we don't have a value, or if it isn't for this basic block and
13123 it's either a hard register, set more than once, or it's a live
13124 at the beginning of the function, return 0.
13126 Because if it's not live at the beginning of the function then the reg
13127 is always set before being used (is never used without being set).
13128 And, if it's set only once, and it's always set before use, then all
13129 uses must have the same last value, even if it's not from this basic
13133 || (rsp
->last_set_label
< label_tick_ebb_start
13134 && (regno
< FIRST_PSEUDO_REGISTER
13135 || regno
>= reg_n_sets_max
13136 || REG_N_SETS (regno
) != 1
13138 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13141 /* If the value was set in a later insn than the ones we are processing,
13142 we can't use it even if the register was only set once. */
13143 if (rsp
->last_set_label
== label_tick
13144 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13147 /* If the value has all its registers valid, return it. */
13148 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13151 /* Otherwise, make a copy and replace any invalid register with
13152 (clobber (const_int 0)). If that fails for some reason, return 0. */
13154 value
= copy_rtx (value
);
13155 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13161 /* Return nonzero if expression X refers to a REG or to memory
13162 that is set in an instruction more recent than FROM_LUID. */
13165 use_crosses_set_p (const_rtx x
, int from_luid
)
13169 enum rtx_code code
= GET_CODE (x
);
13173 unsigned int regno
= REGNO (x
);
13174 unsigned endreg
= END_REGNO (x
);
13176 #ifdef PUSH_ROUNDING
13177 /* Don't allow uses of the stack pointer to be moved,
13178 because we don't know whether the move crosses a push insn. */
13179 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13182 for (; regno
< endreg
; regno
++)
13184 reg_stat_type
*rsp
= ®_stat
[regno
];
13186 && rsp
->last_set_label
== label_tick
13187 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13193 if (code
== MEM
&& mem_last_set
> from_luid
)
13196 fmt
= GET_RTX_FORMAT (code
);
13198 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13203 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13204 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13207 else if (fmt
[i
] == 'e'
13208 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13214 /* Define three variables used for communication between the following
13217 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13218 static int reg_dead_flag
;
13220 /* Function called via note_stores from reg_dead_at_p.
13222 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13223 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13226 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13228 unsigned int regno
, endregno
;
13233 regno
= REGNO (dest
);
13234 endregno
= END_REGNO (dest
);
13235 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13236 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13239 /* Return nonzero if REG is known to be dead at INSN.
13241 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13242 referencing REG, it is dead. If we hit a SET referencing REG, it is
13243 live. Otherwise, see if it is live or dead at the start of the basic
13244 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13245 must be assumed to be always live. */
13248 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13253 /* Set variables for reg_dead_at_p_1. */
13254 reg_dead_regno
= REGNO (reg
);
13255 reg_dead_endregno
= END_REGNO (reg
);
13259 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13260 we allow the machine description to decide whether use-and-clobber
13261 patterns are OK. */
13262 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13264 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13265 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13269 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13270 beginning of basic block. */
13271 block
= BLOCK_FOR_INSN (insn
);
13276 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13279 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13281 return reg_dead_flag
== 1 ? 1 : 0;
13283 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13287 if (insn
== BB_HEAD (block
))
13290 insn
= PREV_INSN (insn
);
13293 /* Look at live-in sets for the basic block that we were in. */
13294 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13295 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13301 /* Note hard registers in X that are used. */
13304 mark_used_regs_combine (rtx x
)
13306 RTX_CODE code
= GET_CODE (x
);
13307 unsigned int regno
;
13318 case ADDR_DIFF_VEC
:
13320 /* CC0 must die in the insn after it is set, so we don't need to take
13321 special note of it here. */
13326 /* If we are clobbering a MEM, mark any hard registers inside the
13327 address as used. */
13328 if (MEM_P (XEXP (x
, 0)))
13329 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13334 /* A hard reg in a wide mode may really be multiple registers.
13335 If so, mark all of them just like the first. */
13336 if (regno
< FIRST_PSEUDO_REGISTER
)
13338 /* None of this applies to the stack, frame or arg pointers. */
13339 if (regno
== STACK_POINTER_REGNUM
13340 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
13341 || regno
== HARD_FRAME_POINTER_REGNUM
13343 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13344 || (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13346 || regno
== FRAME_POINTER_REGNUM
)
13349 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13355 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13357 rtx testreg
= SET_DEST (x
);
13359 while (GET_CODE (testreg
) == SUBREG
13360 || GET_CODE (testreg
) == ZERO_EXTRACT
13361 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13362 testreg
= XEXP (testreg
, 0);
13364 if (MEM_P (testreg
))
13365 mark_used_regs_combine (XEXP (testreg
, 0));
13367 mark_used_regs_combine (SET_SRC (x
));
13375 /* Recursively scan the operands of this expression. */
13378 const char *fmt
= GET_RTX_FORMAT (code
);
13380 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13383 mark_used_regs_combine (XEXP (x
, i
));
13384 else if (fmt
[i
] == 'E')
13388 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13389 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13395 /* Remove register number REGNO from the dead registers list of INSN.
13397 Return the note used to record the death, if there was one. */
13400 remove_death (unsigned int regno
, rtx_insn
*insn
)
13402 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13405 remove_note (insn
, note
);
13410 /* For each register (hardware or pseudo) used within expression X, if its
13411 death is in an instruction with luid between FROM_LUID (inclusive) and
13412 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13413 list headed by PNOTES.
13415 That said, don't move registers killed by maybe_kill_insn.
13417 This is done when X is being merged by combination into TO_INSN. These
13418 notes will then be distributed as needed. */
13421 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13426 enum rtx_code code
= GET_CODE (x
);
13430 unsigned int regno
= REGNO (x
);
13431 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13433 /* Don't move the register if it gets killed in between from and to. */
13434 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13435 && ! reg_referenced_p (x
, maybe_kill_insn
))
13439 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13440 && DF_INSN_LUID (where_dead
) >= from_luid
13441 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13443 rtx note
= remove_death (regno
, where_dead
);
13445 /* It is possible for the call above to return 0. This can occur
13446 when last_death points to I2 or I1 that we combined with.
13447 In that case make a new note.
13449 We must also check for the case where X is a hard register
13450 and NOTE is a death note for a range of hard registers
13451 including X. In that case, we must put REG_DEAD notes for
13452 the remaining registers in place of NOTE. */
13454 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13455 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13456 > GET_MODE_SIZE (GET_MODE (x
))))
13458 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13459 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13460 unsigned int ourend
= END_REGNO (x
);
13463 for (i
= deadregno
; i
< deadend
; i
++)
13464 if (i
< regno
|| i
>= ourend
)
13465 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13468 /* If we didn't find any note, or if we found a REG_DEAD note that
13469 covers only part of the given reg, and we have a multi-reg hard
13470 register, then to be safe we must check for REG_DEAD notes
13471 for each register other than the first. They could have
13472 their own REG_DEAD notes lying around. */
13473 else if ((note
== 0
13475 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13476 < GET_MODE_SIZE (GET_MODE (x
)))))
13477 && regno
< FIRST_PSEUDO_REGISTER
13478 && REG_NREGS (x
) > 1)
13480 unsigned int ourend
= END_REGNO (x
);
13481 unsigned int i
, offset
;
13485 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13489 for (i
= regno
+ offset
; i
< ourend
; i
++)
13490 move_deaths (regno_reg_rtx
[i
],
13491 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13494 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13496 XEXP (note
, 1) = *pnotes
;
13500 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13506 else if (GET_CODE (x
) == SET
)
13508 rtx dest
= SET_DEST (x
);
13510 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13512 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13513 that accesses one word of a multi-word item, some
13514 piece of everything register in the expression is used by
13515 this insn, so remove any old death. */
13516 /* ??? So why do we test for equality of the sizes? */
13518 if (GET_CODE (dest
) == ZERO_EXTRACT
13519 || GET_CODE (dest
) == STRICT_LOW_PART
13520 || (GET_CODE (dest
) == SUBREG
13521 && (((GET_MODE_SIZE (GET_MODE (dest
))
13522 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13523 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13524 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13526 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13530 /* If this is some other SUBREG, we know it replaces the entire
13531 value, so use that as the destination. */
13532 if (GET_CODE (dest
) == SUBREG
)
13533 dest
= SUBREG_REG (dest
);
13535 /* If this is a MEM, adjust deaths of anything used in the address.
13536 For a REG (the only other possibility), the entire value is
13537 being replaced so the old value is not used in this insn. */
13540 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13545 else if (GET_CODE (x
) == CLOBBER
)
13548 len
= GET_RTX_LENGTH (code
);
13549 fmt
= GET_RTX_FORMAT (code
);
13551 for (i
= 0; i
< len
; i
++)
13556 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13557 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13560 else if (fmt
[i
] == 'e')
13561 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13565 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13566 pattern of an insn. X must be a REG. */
13569 reg_bitfield_target_p (rtx x
, rtx body
)
13573 if (GET_CODE (body
) == SET
)
13575 rtx dest
= SET_DEST (body
);
13577 unsigned int regno
, tregno
, endregno
, endtregno
;
13579 if (GET_CODE (dest
) == ZERO_EXTRACT
)
13580 target
= XEXP (dest
, 0);
13581 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
13582 target
= SUBREG_REG (XEXP (dest
, 0));
13586 if (GET_CODE (target
) == SUBREG
)
13587 target
= SUBREG_REG (target
);
13589 if (!REG_P (target
))
13592 tregno
= REGNO (target
), regno
= REGNO (x
);
13593 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
13594 return target
== x
;
13596 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
13597 endregno
= end_hard_regno (GET_MODE (x
), regno
);
13599 return endregno
> tregno
&& regno
< endtregno
;
13602 else if (GET_CODE (body
) == PARALLEL
)
13603 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
13604 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
13610 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13611 as appropriate. I3 and I2 are the insns resulting from the combination
13612 insns including FROM (I2 may be zero).
13614 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13615 not need REG_DEAD notes because they are being substituted for. This
13616 saves searching in the most common cases.
13618 Each note in the list is either ignored or placed on some insns, depending
13619 on the type of note. */
13622 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
13623 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
13625 rtx note
, next_note
;
13627 rtx_insn
*tem_insn
;
13629 for (note
= notes
; note
; note
= next_note
)
13631 rtx_insn
*place
= 0, *place2
= 0;
13633 next_note
= XEXP (note
, 1);
13634 switch (REG_NOTE_KIND (note
))
13638 /* Doesn't matter much where we put this, as long as it's somewhere.
13639 It is preferable to keep these notes on branches, which is most
13640 likely to be i3. */
13644 case REG_NON_LOCAL_GOTO
:
13649 gcc_assert (i2
&& JUMP_P (i2
));
13654 case REG_EH_REGION
:
13655 /* These notes must remain with the call or trapping instruction. */
13658 else if (i2
&& CALL_P (i2
))
13662 gcc_assert (cfun
->can_throw_non_call_exceptions
);
13663 if (may_trap_p (i3
))
13665 else if (i2
&& may_trap_p (i2
))
13667 /* ??? Otherwise assume we've combined things such that we
13668 can now prove that the instructions can't trap. Drop the
13669 note in this case. */
13673 case REG_ARGS_SIZE
:
13674 /* ??? How to distribute between i3-i1. Assume i3 contains the
13675 entire adjustment. Assert i3 contains at least some adjust. */
13676 if (!noop_move_p (i3
))
13678 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
13679 /* fixup_args_size_notes looks at REG_NORETURN note,
13680 so ensure the note is placed there first. */
13684 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
13685 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
13689 XEXP (n
, 1) = REG_NOTES (i3
);
13690 REG_NOTES (i3
) = n
;
13694 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
13695 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13696 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13697 gcc_assert (old_size
!= args_size
13699 && !ACCUMULATE_OUTGOING_ARGS
13700 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
13707 case REG_CALL_DECL
:
13708 /* These notes must remain with the call. It should not be
13709 possible for both I2 and I3 to be a call. */
13714 gcc_assert (i2
&& CALL_P (i2
));
13720 /* Any clobbers for i3 may still exist, and so we must process
13721 REG_UNUSED notes from that insn.
13723 Any clobbers from i2 or i1 can only exist if they were added by
13724 recog_for_combine. In that case, recog_for_combine created the
13725 necessary REG_UNUSED notes. Trying to keep any original
13726 REG_UNUSED notes from these insns can cause incorrect output
13727 if it is for the same register as the original i3 dest.
13728 In that case, we will notice that the register is set in i3,
13729 and then add a REG_UNUSED note for the destination of i3, which
13730 is wrong. However, it is possible to have REG_UNUSED notes from
13731 i2 or i1 for register which were both used and clobbered, so
13732 we keep notes from i2 or i1 if they will turn into REG_DEAD
13735 /* If this register is set or clobbered in I3, put the note there
13736 unless there is one already. */
13737 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
13739 if (from_insn
!= i3
)
13742 if (! (REG_P (XEXP (note
, 0))
13743 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
13744 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
13747 /* Otherwise, if this register is used by I3, then this register
13748 now dies here, so we must put a REG_DEAD note here unless there
13750 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
13751 && ! (REG_P (XEXP (note
, 0))
13752 ? find_regno_note (i3
, REG_DEAD
,
13753 REGNO (XEXP (note
, 0)))
13754 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
13756 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
13764 /* These notes say something about results of an insn. We can
13765 only support them if they used to be on I3 in which case they
13766 remain on I3. Otherwise they are ignored.
13768 If the note refers to an expression that is not a constant, we
13769 must also ignore the note since we cannot tell whether the
13770 equivalence is still true. It might be possible to do
13771 slightly better than this (we only have a problem if I2DEST
13772 or I1DEST is present in the expression), but it doesn't
13773 seem worth the trouble. */
13775 if (from_insn
== i3
13776 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
13781 /* These notes say something about how a register is used. They must
13782 be present on any use of the register in I2 or I3. */
13783 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
13786 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
13795 case REG_LABEL_TARGET
:
13796 case REG_LABEL_OPERAND
:
13797 /* This can show up in several ways -- either directly in the
13798 pattern, or hidden off in the constant pool with (or without?)
13799 a REG_EQUAL note. */
13800 /* ??? Ignore the without-reg_equal-note problem for now. */
13801 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
13802 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
13803 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
13804 && LABEL_REF_LABEL (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
13808 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
13809 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
13810 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
13811 && LABEL_REF_LABEL (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
13819 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13820 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13822 if (place
&& JUMP_P (place
)
13823 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
13824 && (JUMP_LABEL (place
) == NULL
13825 || JUMP_LABEL (place
) == XEXP (note
, 0)))
13827 rtx label
= JUMP_LABEL (place
);
13830 JUMP_LABEL (place
) = XEXP (note
, 0);
13831 else if (LABEL_P (label
))
13832 LABEL_NUSES (label
)--;
13835 if (place2
&& JUMP_P (place2
)
13836 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
13837 && (JUMP_LABEL (place2
) == NULL
13838 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
13840 rtx label
= JUMP_LABEL (place2
);
13843 JUMP_LABEL (place2
) = XEXP (note
, 0);
13844 else if (LABEL_P (label
))
13845 LABEL_NUSES (label
)--;
13851 /* This note says something about the value of a register prior
13852 to the execution of an insn. It is too much trouble to see
13853 if the note is still correct in all situations. It is better
13854 to simply delete it. */
13858 /* If we replaced the right hand side of FROM_INSN with a
13859 REG_EQUAL note, the original use of the dying register
13860 will not have been combined into I3 and I2. In such cases,
13861 FROM_INSN is guaranteed to be the first of the combined
13862 instructions, so we simply need to search back before
13863 FROM_INSN for the previous use or set of this register,
13864 then alter the notes there appropriately.
13866 If the register is used as an input in I3, it dies there.
13867 Similarly for I2, if it is nonzero and adjacent to I3.
13869 If the register is not used as an input in either I3 or I2
13870 and it is not one of the registers we were supposed to eliminate,
13871 there are two possibilities. We might have a non-adjacent I2
13872 or we might have somehow eliminated an additional register
13873 from a computation. For example, we might have had A & B where
13874 we discover that B will always be zero. In this case we will
13875 eliminate the reference to A.
13877 In both cases, we must search to see if we can find a previous
13878 use of A and put the death note there. */
13881 && from_insn
== i2mod
13882 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
13883 tem_insn
= from_insn
;
13887 && CALL_P (from_insn
)
13888 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
13890 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
13892 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
13893 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
13895 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
13897 && reg_overlap_mentioned_p (XEXP (note
, 0),
13899 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
13900 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
13903 /* If the new I2 sets the same register that is marked dead
13904 in the note, the note now should not be put on I2, as the
13905 note refers to a previous incarnation of the reg. */
13906 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
13912 basic_block bb
= this_basic_block
;
13914 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
13916 if (!NONDEBUG_INSN_P (tem_insn
))
13918 if (tem_insn
== BB_HEAD (bb
))
13923 /* If the register is being set at TEM_INSN, see if that is all
13924 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
13925 into a REG_UNUSED note instead. Don't delete sets to
13926 global register vars. */
13927 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
13928 || !global_regs
[REGNO (XEXP (note
, 0))])
13929 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
13931 rtx set
= single_set (tem_insn
);
13932 rtx inner_dest
= 0;
13933 rtx_insn
*cc0_setter
= NULL
;
13936 for (inner_dest
= SET_DEST (set
);
13937 (GET_CODE (inner_dest
) == STRICT_LOW_PART
13938 || GET_CODE (inner_dest
) == SUBREG
13939 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
13940 inner_dest
= XEXP (inner_dest
, 0))
13943 /* Verify that it was the set, and not a clobber that
13944 modified the register.
13946 CC0 targets must be careful to maintain setter/user
13947 pairs. If we cannot delete the setter due to side
13948 effects, mark the user with an UNUSED note instead
13951 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
13952 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
13954 && (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
13955 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
13956 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))
13960 /* Move the notes and links of TEM_INSN elsewhere.
13961 This might delete other dead insns recursively.
13962 First set the pattern to something that won't use
13964 rtx old_notes
= REG_NOTES (tem_insn
);
13966 PATTERN (tem_insn
) = pc_rtx
;
13967 REG_NOTES (tem_insn
) = NULL
;
13969 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
13970 NULL_RTX
, NULL_RTX
, NULL_RTX
);
13971 distribute_links (LOG_LINKS (tem_insn
));
13973 SET_INSN_DELETED (tem_insn
);
13974 if (tem_insn
== i2
)
13977 /* Delete the setter too. */
13980 PATTERN (cc0_setter
) = pc_rtx
;
13981 old_notes
= REG_NOTES (cc0_setter
);
13982 REG_NOTES (cc0_setter
) = NULL
;
13984 distribute_notes (old_notes
, cc0_setter
,
13986 NULL_RTX
, NULL_RTX
, NULL_RTX
);
13987 distribute_links (LOG_LINKS (cc0_setter
));
13989 SET_INSN_DELETED (cc0_setter
);
13990 if (cc0_setter
== i2
)
13996 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
13998 /* If there isn't already a REG_UNUSED note, put one
13999 here. Do not place a REG_DEAD note, even if
14000 the register is also used here; that would not
14001 match the algorithm used in lifetime analysis
14002 and can cause the consistency check in the
14003 scheduler to fail. */
14004 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14005 REGNO (XEXP (note
, 0))))
14010 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14011 || (CALL_P (tem_insn
)
14012 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14016 /* If we are doing a 3->2 combination, and we have a
14017 register which formerly died in i3 and was not used
14018 by i2, which now no longer dies in i3 and is used in
14019 i2 but does not die in i2, and place is between i2
14020 and i3, then we may need to move a link from place to
14022 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14024 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14025 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14027 struct insn_link
*links
= LOG_LINKS (place
);
14028 LOG_LINKS (place
) = NULL
;
14029 distribute_links (links
);
14034 if (tem_insn
== BB_HEAD (bb
))
14040 /* If the register is set or already dead at PLACE, we needn't do
14041 anything with this note if it is still a REG_DEAD note.
14042 We check here if it is set at all, not if is it totally replaced,
14043 which is what `dead_or_set_p' checks, so also check for it being
14046 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14048 unsigned int regno
= REGNO (XEXP (note
, 0));
14049 reg_stat_type
*rsp
= ®_stat
[regno
];
14051 if (dead_or_set_p (place
, XEXP (note
, 0))
14052 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14054 /* Unless the register previously died in PLACE, clear
14055 last_death. [I no longer understand why this is
14057 if (rsp
->last_death
!= place
)
14058 rsp
->last_death
= 0;
14062 rsp
->last_death
= place
;
14064 /* If this is a death note for a hard reg that is occupying
14065 multiple registers, ensure that we are still using all
14066 parts of the object. If we find a piece of the object
14067 that is unused, we must arrange for an appropriate REG_DEAD
14068 note to be added for it. However, we can't just emit a USE
14069 and tag the note to it, since the register might actually
14070 be dead; so we recourse, and the recursive call then finds
14071 the previous insn that used this register. */
14073 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14075 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14076 bool all_used
= true;
14079 for (i
= regno
; i
< endregno
; i
++)
14080 if ((! refers_to_regno_p (i
, PATTERN (place
))
14081 && ! find_regno_fusage (place
, USE
, i
))
14082 || dead_or_set_regno_p (place
, i
))
14090 /* Put only REG_DEAD notes for pieces that are
14091 not already dead or set. */
14093 for (i
= regno
; i
< endregno
;
14094 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
14096 rtx piece
= regno_reg_rtx
[i
];
14097 basic_block bb
= this_basic_block
;
14099 if (! dead_or_set_p (place
, piece
)
14100 && ! reg_bitfield_target_p (piece
,
14103 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14106 distribute_notes (new_note
, place
, place
,
14107 NULL
, NULL_RTX
, NULL_RTX
,
14110 else if (! refers_to_regno_p (i
, PATTERN (place
))
14111 && ! find_regno_fusage (place
, USE
, i
))
14112 for (tem_insn
= PREV_INSN (place
); ;
14113 tem_insn
= PREV_INSN (tem_insn
))
14115 if (!NONDEBUG_INSN_P (tem_insn
))
14117 if (tem_insn
== BB_HEAD (bb
))
14121 if (dead_or_set_p (tem_insn
, piece
)
14122 || reg_bitfield_target_p (piece
,
14123 PATTERN (tem_insn
)))
14125 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14138 /* Any other notes should not be present at this point in the
14140 gcc_unreachable ();
14145 XEXP (note
, 1) = REG_NOTES (place
);
14146 REG_NOTES (place
) = note
;
14150 add_shallow_copy_of_reg_note (place2
, note
);
14154 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14155 I3, I2, and I1 to new locations. This is also called to add a link
14156 pointing at I3 when I3's destination is changed. */
14159 distribute_links (struct insn_link
*links
)
14161 struct insn_link
*link
, *next_link
;
14163 for (link
= links
; link
; link
= next_link
)
14165 rtx_insn
*place
= 0;
14169 next_link
= link
->next
;
14171 /* If the insn that this link points to is a NOTE, ignore it. */
14172 if (NOTE_P (link
->insn
))
14176 rtx pat
= PATTERN (link
->insn
);
14177 if (GET_CODE (pat
) == SET
)
14179 else if (GET_CODE (pat
) == PARALLEL
)
14182 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14184 set
= XVECEXP (pat
, 0, i
);
14185 if (GET_CODE (set
) != SET
)
14188 reg
= SET_DEST (set
);
14189 while (GET_CODE (reg
) == ZERO_EXTRACT
14190 || GET_CODE (reg
) == STRICT_LOW_PART
14191 || GET_CODE (reg
) == SUBREG
)
14192 reg
= XEXP (reg
, 0);
14197 if (REGNO (reg
) == link
->regno
)
14200 if (i
== XVECLEN (pat
, 0))
14206 reg
= SET_DEST (set
);
14208 while (GET_CODE (reg
) == ZERO_EXTRACT
14209 || GET_CODE (reg
) == STRICT_LOW_PART
14210 || GET_CODE (reg
) == SUBREG
)
14211 reg
= XEXP (reg
, 0);
14213 /* A LOG_LINK is defined as being placed on the first insn that uses
14214 a register and points to the insn that sets the register. Start
14215 searching at the next insn after the target of the link and stop
14216 when we reach a set of the register or the end of the basic block.
14218 Note that this correctly handles the link that used to point from
14219 I3 to I2. Also note that not much searching is typically done here
14220 since most links don't point very far away. */
14222 for (insn
= NEXT_INSN (link
->insn
);
14223 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14224 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14225 insn
= NEXT_INSN (insn
))
14226 if (DEBUG_INSN_P (insn
))
14228 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14230 if (reg_referenced_p (reg
, PATTERN (insn
)))
14234 else if (CALL_P (insn
)
14235 && find_reg_fusage (insn
, USE
, reg
))
14240 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14243 /* If we found a place to put the link, place it there unless there
14244 is already a link to the same insn as LINK at that point. */
14248 struct insn_link
*link2
;
14250 FOR_EACH_LOG_LINK (link2
, place
)
14251 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14256 link
->next
= LOG_LINKS (place
);
14257 LOG_LINKS (place
) = link
;
14259 /* Set added_links_insn to the earliest insn we added a
14261 if (added_links_insn
== 0
14262 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14263 added_links_insn
= place
;
14269 /* Check for any register or memory mentioned in EQUIV that is not
14270 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14271 of EXPR where some registers may have been replaced by constants. */
14274 unmentioned_reg_p (rtx equiv
, rtx expr
)
14276 subrtx_iterator::array_type array
;
14277 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14279 const_rtx x
= *iter
;
14280 if ((REG_P (x
) || MEM_P (x
))
14281 && !reg_mentioned_p (x
, expr
))
14287 DEBUG_FUNCTION
void
14288 dump_combine_stats (FILE *file
)
14292 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14293 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14297 dump_combine_total_stats (FILE *file
)
14301 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14302 total_attempts
, total_merges
, total_extras
, total_successes
);
14305 /* Try combining insns through substitution. */
14306 static unsigned int
14307 rest_of_handle_combine (void)
14309 int rebuild_jump_labels_after_combine
;
14311 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14312 df_note_add_problem ();
14315 regstat_init_n_sets_and_refs ();
14316 reg_n_sets_max
= max_reg_num ();
14318 rebuild_jump_labels_after_combine
14319 = combine_instructions (get_insns (), max_reg_num ());
14321 /* Combining insns may have turned an indirect jump into a
14322 direct jump. Rebuild the JUMP_LABEL fields of jumping
14324 if (rebuild_jump_labels_after_combine
)
14326 timevar_push (TV_JUMP
);
14327 rebuild_jump_labels (get_insns ());
14329 timevar_pop (TV_JUMP
);
14332 regstat_free_n_sets_and_refs ();
14338 const pass_data pass_data_combine
=
14340 RTL_PASS
, /* type */
14341 "combine", /* name */
14342 OPTGROUP_NONE
, /* optinfo_flags */
14343 TV_COMBINE
, /* tv_id */
14344 PROP_cfglayout
, /* properties_required */
14345 0, /* properties_provided */
14346 0, /* properties_destroyed */
14347 0, /* todo_flags_start */
14348 TODO_df_finish
, /* todo_flags_finish */
14351 class pass_combine
: public rtl_opt_pass
14354 pass_combine (gcc::context
*ctxt
)
14355 : rtl_opt_pass (pass_data_combine
, ctxt
)
14358 /* opt_pass methods: */
14359 virtual bool gate (function
*) { return (optimize
> 0); }
14360 virtual unsigned int execute (function
*)
14362 return rest_of_handle_combine ();
14365 }; // class pass_combine
14367 } // anon namespace
14370 make_pass_combine (gcc::context
*ctxt
)
14372 return new pass_combine (ctxt
);