1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn
*i2mod
;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 struct reg_stat_type
{
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn
*last_death
;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
255 static vec
<reg_stat_type
> reg_stat
;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max
;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set
;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid
;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn
*subst_insn
;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid
;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs
;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn
*added_links_insn
;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block
;
307 static bool optimize_this_for_speed_p
;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known
;
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost
;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
325 struct insn_link
*next
;
328 static struct insn_link
**uid_log_links
;
331 insn_uid_check (const_rtx insn
)
333 int uid
= INSN_UID (insn
);
334 gcc_checking_assert (uid
<= max_uid_known
);
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack
;
348 /* Allocate a link. */
350 static inline struct insn_link
*
351 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
354 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
355 sizeof (struct insn_link
));
362 /* Incremented for each basic block. */
364 static int label_tick
;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start
;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static machine_mode nonzero_bits_mode
;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
381 static int nonzero_sign_valid
;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
393 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
394 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
407 rtx_insn
*other_insn
;
410 static struct undobuf undobuf
;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences
;
417 static rtx
reg_nonzero_bits_for_combine (const_rtx
, machine_mode
, const_rtx
,
419 unsigned HOST_WIDE_INT
,
420 unsigned HOST_WIDE_INT
*);
421 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, machine_mode
, const_rtx
,
423 unsigned int, unsigned int *);
424 static void do_SUBST (rtx
*, rtx
);
425 static void do_SUBST_INT (int *, int);
426 static void init_reg_last (void);
427 static void setup_incoming_promotions (rtx_insn
*);
428 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
429 static int cant_combine_insn_p (rtx_insn
*);
430 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
431 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
432 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
433 static int contains_muldiv (rtx
);
434 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
436 static void undo_all (void);
437 static void undo_commit (void);
438 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
439 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
440 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
441 static rtx
simplify_if_then_else (rtx
);
442 static rtx
simplify_set (rtx
);
443 static rtx
simplify_logical (rtx
);
444 static rtx
expand_compound_operation (rtx
);
445 static const_rtx
expand_field_assignment (const_rtx
);
446 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
447 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
448 static rtx
extract_left_shift (rtx
, int);
449 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
450 unsigned HOST_WIDE_INT
*);
451 static rtx
canon_reg_for_combine (rtx
, rtx
);
452 static rtx
force_to_mode (rtx
, machine_mode
,
453 unsigned HOST_WIDE_INT
, int);
454 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
455 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
456 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
457 static rtx
make_field_assignment (rtx
);
458 static rtx
apply_distributive_law (rtx
);
459 static rtx
distribute_and_simplify_rtx (rtx
, int);
460 static rtx
simplify_and_const_int_1 (machine_mode
, rtx
,
461 unsigned HOST_WIDE_INT
);
462 static rtx
simplify_and_const_int (rtx
, machine_mode
, rtx
,
463 unsigned HOST_WIDE_INT
);
464 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
465 HOST_WIDE_INT
, machine_mode
, int *);
466 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
467 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
469 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
470 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
471 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
473 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
474 static void update_table_tick (rtx
);
475 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
476 static void check_promoted_subreg (rtx_insn
*, rtx
);
477 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
478 static void record_dead_and_set_regs (rtx_insn
*);
479 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
480 static rtx
get_last_value (const_rtx
);
481 static int use_crosses_set_p (const_rtx
, int);
482 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
483 static int reg_dead_at_p (rtx
, rtx_insn
*);
484 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
485 static int reg_bitfield_target_p (rtx
, rtx
);
486 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
487 static void distribute_links (struct insn_link
*);
488 static void mark_used_regs_combine (rtx
);
489 static void record_promoted_value (rtx_insn
*, rtx
);
490 static bool unmentioned_reg_p (rtx
, rtx
);
491 static void record_truncated_values (rtx
*, void *);
492 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
493 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
520 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
521 bool op0_preserve_value
)
523 int code_int
= (int)*code
;
524 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
525 *code
= (enum rtx_code
)code_int
;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
535 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
540 ret
= split_insns (pattern
, insn
);
541 nregs
= max_reg_num ();
542 if (nregs
> reg_stat
.length ())
543 reg_stat
.safe_grow_cleared (nregs
);
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
554 find_single_use_1 (rtx dest
, rtx
*loc
)
557 enum rtx_code code
= GET_CODE (x
);
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x
)) != CC0
578 && GET_CODE (SET_DEST (x
)) != PC
579 && !REG_P (SET_DEST (x
))
580 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x
)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
583 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
585 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
588 return find_single_use_1 (dest
, &SET_SRC (x
));
592 return find_single_use_1 (dest
, &XEXP (x
, 0));
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt
= GET_RTX_FORMAT (code
);
602 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
606 if (dest
== XEXP (x
, i
)
607 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
608 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
611 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
614 result
= this_result
;
615 else if (this_result
)
616 /* Duplicate usage. */
619 else if (fmt
[i
] == 'E')
623 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
625 if (XVECEXP (x
, i
, j
) == dest
627 && REG_P (XVECEXP (x
, i
, j
))
628 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
631 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
634 result
= this_result
;
635 else if (this_result
)
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
660 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
665 struct insn_link
*link
;
669 next
= NEXT_INSN (insn
);
671 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
674 result
= find_single_use_1 (dest
, &PATTERN (next
));
683 bb
= BLOCK_FOR_INSN (insn
);
684 for (next
= NEXT_INSN (insn
);
685 next
&& BLOCK_FOR_INSN (next
) == bb
;
686 next
= NEXT_INSN (next
))
687 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
689 FOR_EACH_LOG_LINK (link
, next
)
690 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
695 result
= find_single_use_1 (dest
, &PATTERN (next
));
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
712 do_SUBST (rtx
*into
, rtx newval
)
717 if (oldval
== newval
)
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
726 && CONST_INT_P (newval
))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval
)
731 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval
) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval
))));
741 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval
, 0))));
746 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
748 buf
= XNEW (struct undo
);
750 buf
->kind
= UNDO_RTX
;
752 buf
->old_contents
.r
= oldval
;
755 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
765 do_SUBST_INT (int *into
, int newval
)
770 if (oldval
== newval
)
774 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
776 buf
= XNEW (struct undo
);
778 buf
->kind
= UNDO_INT
;
780 buf
->old_contents
.i
= oldval
;
783 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
794 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
797 machine_mode oldval
= GET_MODE (*into
);
799 if (oldval
== newval
)
803 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
805 buf
= XNEW (struct undo
);
807 buf
->kind
= UNDO_MODE
;
809 buf
->old_contents
.m
= oldval
;
810 adjust_reg_mode (*into
, newval
);
812 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
823 struct insn_link
* oldval
= *into
;
825 if (oldval
== newval
)
829 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
831 buf
= XNEW (struct undo
);
833 buf
->kind
= UNDO_LINKS
;
835 buf
->old_contents
.l
= oldval
;
838 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
852 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
853 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
855 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
856 int new_i2_cost
, new_i3_cost
;
857 int old_cost
, new_cost
;
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost
= INSN_COST (i2
);
861 i3_cost
= INSN_COST (i3
);
865 i1_cost
= INSN_COST (i1
);
868 i0_cost
= INSN_COST (i0
);
869 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
870 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
874 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
875 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
881 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
882 i1_cost
= i0_cost
= 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
891 /* Calculate the replacement insn_rtx_costs. */
892 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
895 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
896 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
897 ? new_i2_cost
+ new_i3_cost
: 0;
901 new_cost
= new_i3_cost
;
905 if (undobuf
.other_insn
)
907 int old_other_cost
, new_other_cost
;
909 old_other_cost
= INSN_COST (undobuf
.other_insn
);
910 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
911 if (old_other_cost
> 0 && new_other_cost
> 0)
913 old_cost
+= old_other_cost
;
914 new_cost
+= new_other_cost
;
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject
= old_cost
> 0 && new_cost
> old_cost
;
926 fprintf (dump_file
, "%s combination of insns ",
927 reject
? "rejecting" : "allowing");
929 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
930 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
931 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
932 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
934 fprintf (dump_file
, "original costs ");
936 fprintf (dump_file
, "%d + ", i0_cost
);
937 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
938 fprintf (dump_file
, "%d + ", i1_cost
);
939 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
942 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
943 new_i2_cost
, new_i3_cost
, new_cost
);
945 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2
) = new_i2_cost
;
953 INSN_COST (i3
) = new_i3_cost
;
965 /* Delete any insns that copy a register to itself. */
968 delete_noop_moves (void)
970 rtx_insn
*insn
, *next
;
973 FOR_EACH_BB_FN (bb
, cfun
)
975 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
977 next
= NEXT_INSN (insn
);
978 if (INSN_P (insn
) && noop_move_p (insn
))
981 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
983 delete_insn_and_edges (insn
);
990 /* Return false if we do not want to (or cannot) combine DEF. */
992 can_combine_def_p (df_ref def
)
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
998 unsigned int regno
= DF_REF_REGNO (def
);
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno
== FRAME_POINTER_REGNUM
1002 && (!reload_completed
|| frame_pointer_needed
))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno
== HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed
|| frame_pointer_needed
))
1006 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1007 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1013 /* Return false if we do not want to (or cannot) combine USE. */
1015 can_combine_use_p (df_ref use
)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1024 /* Fill in log links field for all insns. */
1027 create_log_links (void)
1030 rtx_insn
**next_use
;
1034 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb
, cfun
)
1047 FOR_BB_INSNS_REVERSE (bb
, insn
)
1049 if (!NONDEBUG_INSN_P (insn
))
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn
));
1055 FOR_EACH_INSN_DEF (def
, insn
)
1057 unsigned int regno
= DF_REF_REGNO (def
);
1060 if (!next_use
[regno
])
1063 if (!can_combine_def_p (def
))
1066 use_insn
= next_use
[regno
];
1067 next_use
[regno
] = NULL
;
1069 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno
< FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn
)) >= 0)
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link
*links
;
1085 FOR_EACH_LOG_LINK (links
, use_insn
)
1086 if (insn
== links
->insn
&& regno
== links
->regno
)
1090 LOG_LINKS (use_insn
)
1091 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1094 FOR_EACH_INSN_USE (use
, insn
)
1095 if (can_combine_use_p (use
))
1096 next_use
[DF_REF_REGNO (use
)] = insn
;
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1111 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1113 struct insn_link
*links
;
1114 FOR_EACH_LOG_LINK (links
, b
)
1115 if (links
->insn
== a
)
1117 if (HAVE_cc0
&& sets_cc0_p (a
))
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1128 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1130 rtx_insn
*insn
, *next
;
1132 struct insn_link
*links
, *nextlinks
;
1134 basic_block last_bb
;
1136 int new_direct_jump_p
= 0;
1138 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1139 first
= NEXT_INSN (first
);
1143 combine_attempts
= 0;
1146 combine_successes
= 0;
1148 rtl_hooks
= combine_rtl_hooks
;
1150 reg_stat
.safe_grow_cleared (nregs
);
1152 init_recog_no_volatile ();
1154 /* Allocate array for insn info. */
1155 max_uid_known
= get_max_uid ();
1156 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1157 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1158 gcc_obstack_init (&insn_link_obstack
);
1160 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1165 nonzero_sign_valid
= 0;
1166 label_tick
= label_tick_ebb_start
= 1;
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1175 setup_incoming_promotions (first
);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1183 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1188 if (!single_pred_p (this_basic_block
)
1189 || single_pred (this_basic_block
) != last_bb
)
1190 label_tick_ebb_start
= label_tick
;
1191 last_bb
= this_basic_block
;
1193 FOR_BB_INSNS (this_basic_block
, insn
)
1194 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1198 subst_low_luid
= DF_INSN_LUID (insn
);
1201 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1203 record_dead_and_set_regs (insn
);
1206 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1207 if (REG_NOTE_KIND (links
) == REG_INC
)
1208 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1211 /* Record the current insn_rtx_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn
))
1213 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1214 optimize_this_for_speed_p
);
1217 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1218 dump_insn_slim (dump_file
, insn
);
1223 nonzero_sign_valid
= 1;
1225 /* Now scan all the insns in forward order. */
1226 label_tick
= label_tick_ebb_start
= 1;
1228 setup_incoming_promotions (first
);
1229 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1230 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1232 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1234 rtx_insn
*last_combined_insn
= NULL
;
1235 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1240 if (!single_pred_p (this_basic_block
)
1241 || single_pred (this_basic_block
) != last_bb
)
1242 label_tick_ebb_start
= label_tick
;
1243 last_bb
= this_basic_block
;
1245 rtl_profile_for_bb (this_basic_block
);
1246 for (insn
= BB_HEAD (this_basic_block
);
1247 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1248 insn
= next
? next
: NEXT_INSN (insn
))
1251 if (!NONDEBUG_INSN_P (insn
))
1254 while (last_combined_insn
1255 && (!NONDEBUG_INSN_P (last_combined_insn
)
1256 || last_combined_insn
->deleted ()))
1257 last_combined_insn
= PREV_INSN (last_combined_insn
);
1258 if (last_combined_insn
== NULL_RTX
1259 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1260 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1261 last_combined_insn
= insn
;
1263 /* See if we know about function return values before this
1264 insn based upon SUBREG flags. */
1265 check_promoted_subreg (insn
, PATTERN (insn
));
1267 /* See if we can find hardregs and subreg of pseudos in
1268 narrower modes. This could help turning TRUNCATEs
1270 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1272 /* Try this insn with each insn it links back to. */
1274 FOR_EACH_LOG_LINK (links
, insn
)
1275 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1276 NULL
, &new_direct_jump_p
,
1277 last_combined_insn
)) != 0)
1279 statistics_counter_event (cfun
, "two-insn combine", 1);
1283 /* Try each sequence of three linked insns ending with this one. */
1285 if (max_combine
>= 3)
1286 FOR_EACH_LOG_LINK (links
, insn
)
1288 rtx_insn
*link
= links
->insn
;
1290 /* If the linked insn has been replaced by a note, then there
1291 is no point in pursuing this chain any further. */
1295 FOR_EACH_LOG_LINK (nextlinks
, link
)
1296 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1297 NULL
, &new_direct_jump_p
,
1298 last_combined_insn
)) != 0)
1300 statistics_counter_event (cfun
, "three-insn combine", 1);
1305 /* Try to combine a jump insn that uses CC0
1306 with a preceding insn that sets CC0, and maybe with its
1307 logical predecessor as well.
1308 This is how we make decrement-and-branch insns.
1309 We need this special code because data flow connections
1310 via CC0 do not get entered in LOG_LINKS. */
1314 && (prev
= prev_nonnote_insn (insn
)) != 0
1315 && NONJUMP_INSN_P (prev
)
1316 && sets_cc0_p (PATTERN (prev
)))
1318 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1320 last_combined_insn
)) != 0)
1323 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1324 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1325 NULL
, &new_direct_jump_p
,
1326 last_combined_insn
)) != 0)
1330 /* Do the same for an insn that explicitly references CC0. */
1331 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1332 && (prev
= prev_nonnote_insn (insn
)) != 0
1333 && NONJUMP_INSN_P (prev
)
1334 && sets_cc0_p (PATTERN (prev
))
1335 && GET_CODE (PATTERN (insn
)) == SET
1336 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1338 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1340 last_combined_insn
)) != 0)
1343 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1344 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1345 NULL
, &new_direct_jump_p
,
1346 last_combined_insn
)) != 0)
1350 /* Finally, see if any of the insns that this insn links to
1351 explicitly references CC0. If so, try this insn, that insn,
1352 and its predecessor if it sets CC0. */
1355 FOR_EACH_LOG_LINK (links
, insn
)
1356 if (NONJUMP_INSN_P (links
->insn
)
1357 && GET_CODE (PATTERN (links
->insn
)) == SET
1358 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1359 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1360 && NONJUMP_INSN_P (prev
)
1361 && sets_cc0_p (PATTERN (prev
))
1362 && (next
= try_combine (insn
, links
->insn
,
1363 prev
, NULL
, &new_direct_jump_p
,
1364 last_combined_insn
)) != 0)
1368 /* Try combining an insn with two different insns whose results it
1370 if (max_combine
>= 3)
1371 FOR_EACH_LOG_LINK (links
, insn
)
1372 for (nextlinks
= links
->next
; nextlinks
;
1373 nextlinks
= nextlinks
->next
)
1374 if ((next
= try_combine (insn
, links
->insn
,
1375 nextlinks
->insn
, NULL
,
1377 last_combined_insn
)) != 0)
1380 statistics_counter_event (cfun
, "three-insn combine", 1);
1384 /* Try four-instruction combinations. */
1385 if (max_combine
>= 4)
1386 FOR_EACH_LOG_LINK (links
, insn
)
1388 struct insn_link
*next1
;
1389 rtx_insn
*link
= links
->insn
;
1391 /* If the linked insn has been replaced by a note, then there
1392 is no point in pursuing this chain any further. */
1396 FOR_EACH_LOG_LINK (next1
, link
)
1398 rtx_insn
*link1
= next1
->insn
;
1401 /* I0 -> I1 -> I2 -> I3. */
1402 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1403 if ((next
= try_combine (insn
, link
, link1
,
1406 last_combined_insn
)) != 0)
1408 statistics_counter_event (cfun
, "four-insn combine", 1);
1411 /* I0, I1 -> I2, I2 -> I3. */
1412 for (nextlinks
= next1
->next
; nextlinks
;
1413 nextlinks
= nextlinks
->next
)
1414 if ((next
= try_combine (insn
, link
, link1
,
1417 last_combined_insn
)) != 0)
1419 statistics_counter_event (cfun
, "four-insn combine", 1);
1424 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1426 rtx_insn
*link1
= next1
->insn
;
1429 /* I0 -> I2; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks
, link
)
1431 if ((next
= try_combine (insn
, link
, link1
,
1434 last_combined_insn
)) != 0)
1436 statistics_counter_event (cfun
, "four-insn combine", 1);
1439 /* I0 -> I1; I1, I2 -> I3. */
1440 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1441 if ((next
= try_combine (insn
, link
, link1
,
1444 last_combined_insn
)) != 0)
1446 statistics_counter_event (cfun
, "four-insn combine", 1);
1452 /* Try this insn with each REG_EQUAL note it links back to. */
1453 FOR_EACH_LOG_LINK (links
, insn
)
1456 rtx_insn
*temp
= links
->insn
;
1457 if ((set
= single_set (temp
)) != 0
1458 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1459 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1460 /* Avoid using a register that may already been marked
1461 dead by an earlier instruction. */
1462 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1463 && (GET_MODE (note
) == VOIDmode
1464 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1465 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1466 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1467 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1468 == GET_MODE (note
))))))
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig_src
= SET_SRC (set
);
1474 rtx orig_dest
= SET_DEST (set
);
1475 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1476 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1477 SET_SRC (set
) = note
;
1479 i2mod_old_rhs
= copy_rtx (orig_src
);
1480 i2mod_new_rhs
= copy_rtx (note
);
1481 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1483 last_combined_insn
);
1487 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1490 SET_SRC (set
) = orig_src
;
1491 SET_DEST (set
) = orig_dest
;
1496 record_dead_and_set_regs (insn
);
1503 default_rtl_profile ();
1505 new_direct_jump_p
|= purge_all_dead_edges ();
1506 delete_noop_moves ();
1509 obstack_free (&insn_link_obstack
, NULL
);
1510 free (uid_log_links
);
1511 free (uid_insn_cost
);
1512 reg_stat
.release ();
1515 struct undo
*undo
, *next
;
1516 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1524 total_attempts
+= combine_attempts
;
1525 total_merges
+= combine_merges
;
1526 total_extras
+= combine_extras
;
1527 total_successes
+= combine_successes
;
1529 nonzero_sign_valid
= 0;
1530 rtl_hooks
= general_rtl_hooks
;
1532 /* Make recognizer allow volatile MEMs again. */
1535 return new_direct_jump_p
;
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1541 init_reg_last (void)
1546 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1547 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1550 /* Set up any promoted values for incoming argument registers. */
1553 setup_incoming_promotions (rtx_insn
*first
)
1556 bool strictly_local
= false;
1558 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1559 arg
= DECL_CHAIN (arg
))
1561 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1563 machine_mode mode1
, mode2
, mode3
, mode4
;
1565 /* Only continue if the incoming argument is in a register. */
1569 /* Determine, if possible, whether all call sites of the current
1570 function lie within the current compilation unit. (This does
1571 take into account the exporting of a function via taking its
1572 address, and so forth.) */
1573 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1575 /* The mode and signedness of the argument before any promotions happen
1576 (equal to the mode of the pseudo holding it at that stage). */
1577 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1578 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1580 /* The mode and signedness of the argument after any source language and
1581 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1582 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1583 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1585 /* The mode and signedness of the argument as it is actually passed,
1586 see assign_parm_setup_reg in function.c. */
1587 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1588 TREE_TYPE (cfun
->decl
), 0);
1590 /* The mode of the register in which the argument is being passed. */
1591 mode4
= GET_MODE (reg
);
1593 /* Eliminate sign extensions in the callee when:
1594 (a) A mode promotion has occurred; */
1597 /* (b) The mode of the register is the same as the mode of
1598 the argument as it is passed; */
1601 /* (c) There's no language level extension; */
1604 /* (c.1) All callers are from the current compilation unit. If that's
1605 the case we don't have to rely on an ABI, we only have to know
1606 what we're generating right now, and we know that we will do the
1607 mode1 to mode2 promotion with the given sign. */
1608 else if (!strictly_local
)
1610 /* (c.2) The combination of the two promotions is useful. This is
1611 true when the signs match, or if the first promotion is unsigned.
1612 In the later case, (sign_extend (zero_extend x)) is the same as
1613 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1619 /* Record that the value was promoted from mode1 to mode3,
1620 so that any sign extension at the head of the current
1621 function may be eliminated. */
1622 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1623 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1624 record_value_for_reg (reg
, first
, x
);
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630 because some machines (maybe most) will actually do the sign-extension and
1631 this is the conservative approach.
1633 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1637 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1639 if (GET_MODE_PRECISION (mode
) < prec
1640 && CONST_INT_P (src
)
1642 && val_signbit_known_set_p (mode
, INTVAL (src
)))
1643 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (mode
));
1648 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1652 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1655 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1656 unsigned HOST_WIDE_INT bits
= 0;
1657 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1658 unsigned int num
= 0;
1661 reg_equal
= XEXP (reg_equal_note
, 0);
1663 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1665 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1667 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1670 /* Don't call nonzero_bits if it cannot change anything. */
1671 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1673 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1674 if (reg_equal
&& bits
)
1675 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1676 rsp
->nonzero_bits
|= bits
;
1679 /* Don't call num_sign_bit_copies if it cannot change anything. */
1680 if (rsp
->sign_bit_copies
!= 1)
1682 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1683 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1685 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1686 if (num
== 0 || numeq
> num
)
1689 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1690 rsp
->sign_bit_copies
= num
;
1694 /* Called via note_stores. If X is a pseudo that is narrower than
1695 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1697 If we are setting only a portion of X and we can't figure out what
1698 portion, assume all bits will be used since we don't know what will
1701 Similarly, set how many bits of X are known to be copies of the sign bit
1702 at all locations in the function. This is the smallest number implied
1706 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1708 rtx_insn
*insn
= (rtx_insn
*) data
;
1709 scalar_int_mode mode
;
1712 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1713 /* If this register is undefined at the start of the file, we can't
1714 say what its contents were. */
1715 && ! REGNO_REG_SET_P
1716 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1717 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1718 && HWI_COMPUTABLE_MODE_P (mode
))
1720 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1722 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1724 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1725 rsp
->sign_bit_copies
= 1;
1729 /* If this register is being initialized using itself, and the
1730 register is uninitialized in this basic block, and there are
1731 no LOG_LINKS which set the register, then part of the
1732 register is uninitialized. In that case we can't assume
1733 anything about the number of nonzero bits.
1735 ??? We could do better if we checked this in
1736 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1737 could avoid making assumptions about the insn which initially
1738 sets the register, while still using the information in other
1739 insns. We would have to be careful to check every insn
1740 involved in the combination. */
1743 && reg_referenced_p (x
, PATTERN (insn
))
1744 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1747 struct insn_link
*link
;
1749 FOR_EACH_LOG_LINK (link
, insn
)
1750 if (dead_or_set_p (link
->insn
, x
))
1754 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1755 rsp
->sign_bit_copies
= 1;
1760 /* If this is a complex assignment, see if we can convert it into a
1761 simple assignment. */
1762 set
= expand_field_assignment (set
);
1764 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1765 set what we know about X. */
1767 if (SET_DEST (set
) == x
1768 || (paradoxical_subreg_p (SET_DEST (set
))
1769 && SUBREG_REG (SET_DEST (set
)) == x
))
1770 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1773 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1774 rsp
->sign_bit_copies
= 1;
1779 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1780 optionally insns that were previously combined into I3 or that will be
1781 combined into the merger of INSN and I3. The order is PRED, PRED2,
1782 INSN, SUCC, SUCC2, I3.
1784 Return 0 if the combination is not allowed for any reason.
1786 If the combination is allowed, *PDEST will be set to the single
1787 destination of INSN and *PSRC to the single source, and this function
1791 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1792 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1793 rtx
*pdest
, rtx
*psrc
)
1800 bool all_adjacent
= true;
1801 int (*is_volatile_p
) (const_rtx
);
1807 if (next_active_insn (succ2
) != i3
)
1808 all_adjacent
= false;
1809 if (next_active_insn (succ
) != succ2
)
1810 all_adjacent
= false;
1812 else if (next_active_insn (succ
) != i3
)
1813 all_adjacent
= false;
1814 if (next_active_insn (insn
) != succ
)
1815 all_adjacent
= false;
1817 else if (next_active_insn (insn
) != i3
)
1818 all_adjacent
= false;
1820 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1821 or a PARALLEL consisting of such a SET and CLOBBERs.
1823 If INSN has CLOBBER parallel parts, ignore them for our processing.
1824 By definition, these happen during the execution of the insn. When it
1825 is merged with another insn, all bets are off. If they are, in fact,
1826 needed and aren't also supplied in I3, they may be added by
1827 recog_for_combine. Otherwise, it won't match.
1829 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1832 Get the source and destination of INSN. If more than one, can't
1835 if (GET_CODE (PATTERN (insn
)) == SET
)
1836 set
= PATTERN (insn
);
1837 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1838 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1840 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1842 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1844 switch (GET_CODE (elt
))
1846 /* This is important to combine floating point insns
1847 for the SH4 port. */
1849 /* Combining an isolated USE doesn't make sense.
1850 We depend here on combinable_i3pat to reject them. */
1851 /* The code below this loop only verifies that the inputs of
1852 the SET in INSN do not change. We call reg_set_between_p
1853 to verify that the REG in the USE does not change between
1855 If the USE in INSN was for a pseudo register, the matching
1856 insn pattern will likely match any register; combining this
1857 with any other USE would only be safe if we knew that the
1858 used registers have identical values, or if there was
1859 something to tell them apart, e.g. different modes. For
1860 now, we forgo such complicated tests and simply disallow
1861 combining of USES of pseudo registers with any other USE. */
1862 if (REG_P (XEXP (elt
, 0))
1863 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1865 rtx i3pat
= PATTERN (i3
);
1866 int i
= XVECLEN (i3pat
, 0) - 1;
1867 unsigned int regno
= REGNO (XEXP (elt
, 0));
1871 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1873 if (GET_CODE (i3elt
) == USE
1874 && REG_P (XEXP (i3elt
, 0))
1875 && (REGNO (XEXP (i3elt
, 0)) == regno
1876 ? reg_set_between_p (XEXP (elt
, 0),
1877 PREV_INSN (insn
), i3
)
1878 : regno
>= FIRST_PSEUDO_REGISTER
))
1885 /* We can ignore CLOBBERs. */
1890 /* Ignore SETs whose result isn't used but not those that
1891 have side-effects. */
1892 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1893 && insn_nothrow_p (insn
)
1894 && !side_effects_p (elt
))
1897 /* If we have already found a SET, this is a second one and
1898 so we cannot combine with this insn. */
1906 /* Anything else means we can't combine. */
1912 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1913 so don't do anything with it. */
1914 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1923 /* The simplification in expand_field_assignment may call back to
1924 get_last_value, so set safe guard here. */
1925 subst_low_luid
= DF_INSN_LUID (insn
);
1927 set
= expand_field_assignment (set
);
1928 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1930 /* Do not eliminate user-specified register if it is in an
1931 asm input because we may break the register asm usage defined
1932 in GCC manual if allow to do so.
1933 Be aware that this may cover more cases than we expect but this
1934 should be harmless. */
1935 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1936 && extract_asm_operands (PATTERN (i3
)))
1939 /* Don't eliminate a store in the stack pointer. */
1940 if (dest
== stack_pointer_rtx
1941 /* Don't combine with an insn that sets a register to itself if it has
1942 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1943 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1944 /* Can't merge an ASM_OPERANDS. */
1945 || GET_CODE (src
) == ASM_OPERANDS
1946 /* Can't merge a function call. */
1947 || GET_CODE (src
) == CALL
1948 /* Don't eliminate a function call argument. */
1950 && (find_reg_fusage (i3
, USE
, dest
)
1952 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1953 && global_regs
[REGNO (dest
)])))
1954 /* Don't substitute into an incremented register. */
1955 || FIND_REG_INC_NOTE (i3
, dest
)
1956 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1957 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1958 /* Don't substitute into a non-local goto, this confuses CFG. */
1959 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1960 /* Make sure that DEST is not used after INSN but before SUCC, or
1961 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1964 && (reg_used_between_p (dest
, succ2
, i3
)
1965 || reg_used_between_p (dest
, succ
, succ2
)))
1966 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
1968 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1969 that case SUCC is not in the insn stream, so use SUCC2
1970 instead for this test. */
1971 && reg_used_between_p (dest
, insn
,
1973 && INSN_UID (succ
) == INSN_UID (succ2
)
1975 /* Make sure that the value that is to be substituted for the register
1976 does not use any registers whose values alter in between. However,
1977 If the insns are adjacent, a use can't cross a set even though we
1978 think it might (this can happen for a sequence of insns each setting
1979 the same destination; last_set of that register might point to
1980 a NOTE). If INSN has a REG_EQUIV note, the register is always
1981 equivalent to the memory so the substitution is valid even if there
1982 are intervening stores. Also, don't move a volatile asm or
1983 UNSPEC_VOLATILE across any other insns. */
1986 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1987 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1988 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1989 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1990 /* Don't combine across a CALL_INSN, because that would possibly
1991 change whether the life span of some REGs crosses calls or not,
1992 and it is a pain to update that information.
1993 Exception: if source is a constant, moving it later can't hurt.
1994 Accept that as a special case. */
1995 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
1998 /* DEST must either be a REG or CC0. */
2001 /* If register alignment is being enforced for multi-word items in all
2002 cases except for parameters, it is possible to have a register copy
2003 insn referencing a hard register that is not allowed to contain the
2004 mode being copied and which would not be valid as an operand of most
2005 insns. Eliminate this problem by not combining with such an insn.
2007 Also, on some machines we don't want to extend the life of a hard
2011 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2012 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
2013 /* Don't extend the life of a hard register unless it is
2014 user variable (if we have few registers) or it can't
2015 fit into the desired register (meaning something special
2017 Also avoid substituting a return register into I3, because
2018 reload can't handle a conflict with constraints of other
2020 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2021 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
2024 else if (GET_CODE (dest
) != CC0
)
2028 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2029 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2030 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2032 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2034 /* If the clobber represents an earlyclobber operand, we must not
2035 substitute an expression containing the clobbered register.
2036 As we do not analyze the constraint strings here, we have to
2037 make the conservative assumption. However, if the register is
2038 a fixed hard reg, the clobber cannot represent any operand;
2039 we leave it up to the machine description to either accept or
2040 reject use-and-clobber patterns. */
2042 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2043 || !fixed_regs
[REGNO (reg
)])
2044 if (reg_overlap_mentioned_p (reg
, src
))
2048 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2049 or not), reject, unless nothing volatile comes between it and I3 */
2051 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2053 /* Make sure neither succ nor succ2 contains a volatile reference. */
2054 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2056 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2058 /* We'll check insns between INSN and I3 below. */
2061 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2062 to be an explicit register variable, and was chosen for a reason. */
2064 if (GET_CODE (src
) == ASM_OPERANDS
2065 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2068 /* If INSN contains volatile references (specifically volatile MEMs),
2069 we cannot combine across any other volatile references.
2070 Even if INSN doesn't contain volatile references, any intervening
2071 volatile insn might affect machine state. */
2073 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2077 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2078 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2081 /* If INSN contains an autoincrement or autodecrement, make sure that
2082 register is not used between there and I3, and not already used in
2083 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2084 Also insist that I3 not be a jump; if it were one
2085 and the incremented register were spilled, we would lose. */
2088 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2089 if (REG_NOTE_KIND (link
) == REG_INC
2091 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2092 || (pred
!= NULL_RTX
2093 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2094 || (pred2
!= NULL_RTX
2095 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2096 || (succ
!= NULL_RTX
2097 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2098 || (succ2
!= NULL_RTX
2099 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2100 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2103 /* Don't combine an insn that follows a CC0-setting insn.
2104 An insn that uses CC0 must not be separated from the one that sets it.
2105 We do, however, allow I2 to follow a CC0-setting insn if that insn
2106 is passed as I1; in that case it will be deleted also.
2107 We also allow combining in this case if all the insns are adjacent
2108 because that would leave the two CC0 insns adjacent as well.
2109 It would be more logical to test whether CC0 occurs inside I1 or I2,
2110 but that would be much slower, and this ought to be equivalent. */
2114 p
= prev_nonnote_insn (insn
);
2115 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2120 /* If we get here, we have passed all the tests and the combination is
2129 /* LOC is the location within I3 that contains its pattern or the component
2130 of a PARALLEL of the pattern. We validate that it is valid for combining.
2132 One problem is if I3 modifies its output, as opposed to replacing it
2133 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2134 doing so would produce an insn that is not equivalent to the original insns.
2138 (set (reg:DI 101) (reg:DI 100))
2139 (set (subreg:SI (reg:DI 101) 0) <foo>)
2141 This is NOT equivalent to:
2143 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2144 (set (reg:DI 101) (reg:DI 100))])
2146 Not only does this modify 100 (in which case it might still be valid
2147 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2149 We can also run into a problem if I2 sets a register that I1
2150 uses and I1 gets directly substituted into I3 (not via I2). In that
2151 case, we would be getting the wrong value of I2DEST into I3, so we
2152 must reject the combination. This case occurs when I2 and I1 both
2153 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2154 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2155 of a SET must prevent combination from occurring. The same situation
2156 can occur for I0, in which case I0_NOT_IN_SRC is set.
2158 Before doing the above check, we first try to expand a field assignment
2159 into a set of logical operations.
2161 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2162 we place a register that is both set and used within I3. If more than one
2163 such register is detected, we fail.
2165 Return 1 if the combination is valid, zero otherwise. */
2168 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2169 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2173 if (GET_CODE (x
) == SET
)
2176 rtx dest
= SET_DEST (set
);
2177 rtx src
= SET_SRC (set
);
2178 rtx inner_dest
= dest
;
2181 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2182 || GET_CODE (inner_dest
) == SUBREG
2183 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2184 inner_dest
= XEXP (inner_dest
, 0);
2186 /* Check for the case where I3 modifies its output, as discussed
2187 above. We don't want to prevent pseudos from being combined
2188 into the address of a MEM, so only prevent the combination if
2189 i1 or i2 set the same MEM. */
2190 if ((inner_dest
!= dest
&&
2191 (!MEM_P (inner_dest
)
2192 || rtx_equal_p (i2dest
, inner_dest
)
2193 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2194 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2195 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2196 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2197 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2199 /* This is the same test done in can_combine_p except we can't test
2200 all_adjacent; we don't have to, since this instruction will stay
2201 in place, thus we are not considering increasing the lifetime of
2204 Also, if this insn sets a function argument, combining it with
2205 something that might need a spill could clobber a previous
2206 function argument; the all_adjacent test in can_combine_p also
2207 checks this; here, we do a more specific test for this case. */
2209 || (REG_P (inner_dest
)
2210 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2211 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2212 GET_MODE (inner_dest
))))
2213 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2214 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2217 /* If DEST is used in I3, it is being killed in this insn, so
2218 record that for later. We have to consider paradoxical
2219 subregs here, since they kill the whole register, but we
2220 ignore partial subregs, STRICT_LOW_PART, etc.
2221 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2222 STACK_POINTER_REGNUM, since these are always considered to be
2223 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2225 if (GET_CODE (subdest
) == SUBREG
2226 && (GET_MODE_SIZE (GET_MODE (subdest
))
2227 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2228 subdest
= SUBREG_REG (subdest
);
2231 && reg_referenced_p (subdest
, PATTERN (i3
))
2232 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2233 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2234 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2235 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2236 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2237 || ! fixed_regs
[REGNO (subdest
)]))
2238 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2240 if (*pi3dest_killed
)
2243 *pi3dest_killed
= subdest
;
2247 else if (GET_CODE (x
) == PARALLEL
)
2251 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2252 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2253 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2260 /* Return 1 if X is an arithmetic expression that contains a multiplication
2261 and division. We don't count multiplications by powers of two here. */
2264 contains_muldiv (rtx x
)
2266 switch (GET_CODE (x
))
2268 case MOD
: case DIV
: case UMOD
: case UDIV
:
2272 return ! (CONST_INT_P (XEXP (x
, 1))
2273 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2276 return contains_muldiv (XEXP (x
, 0))
2277 || contains_muldiv (XEXP (x
, 1));
2280 return contains_muldiv (XEXP (x
, 0));
2286 /* Determine whether INSN can be used in a combination. Return nonzero if
2287 not. This is used in try_combine to detect early some cases where we
2288 can't perform combinations. */
2291 cant_combine_insn_p (rtx_insn
*insn
)
2296 /* If this isn't really an insn, we can't do anything.
2297 This can occur when flow deletes an insn that it has merged into an
2298 auto-increment address. */
2299 if (!NONDEBUG_INSN_P (insn
))
2302 /* Never combine loads and stores involving hard regs that are likely
2303 to be spilled. The register allocator can usually handle such
2304 reg-reg moves by tying. If we allow the combiner to make
2305 substitutions of likely-spilled regs, reload might die.
2306 As an exception, we allow combinations involving fixed regs; these are
2307 not available to the register allocator so there's no risk involved. */
2309 set
= single_set (insn
);
2312 src
= SET_SRC (set
);
2313 dest
= SET_DEST (set
);
2314 if (GET_CODE (src
) == SUBREG
)
2315 src
= SUBREG_REG (src
);
2316 if (GET_CODE (dest
) == SUBREG
)
2317 dest
= SUBREG_REG (dest
);
2318 if (REG_P (src
) && REG_P (dest
)
2319 && ((HARD_REGISTER_P (src
)
2320 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2321 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2322 || (HARD_REGISTER_P (dest
)
2323 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2324 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2330 struct likely_spilled_retval_info
2332 unsigned regno
, nregs
;
2336 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2337 hard registers that are known to be written to / clobbered in full. */
2339 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2341 struct likely_spilled_retval_info
*const info
=
2342 (struct likely_spilled_retval_info
*) data
;
2343 unsigned regno
, nregs
;
2346 if (!REG_P (XEXP (set
, 0)))
2349 if (regno
>= info
->regno
+ info
->nregs
)
2351 nregs
= REG_NREGS (x
);
2352 if (regno
+ nregs
<= info
->regno
)
2354 new_mask
= (2U << (nregs
- 1)) - 1;
2355 if (regno
< info
->regno
)
2356 new_mask
>>= info
->regno
- regno
;
2358 new_mask
<<= regno
- info
->regno
;
2359 info
->mask
&= ~new_mask
;
2362 /* Return nonzero iff part of the return value is live during INSN, and
2363 it is likely spilled. This can happen when more than one insn is needed
2364 to copy the return value, e.g. when we consider to combine into the
2365 second copy insn for a complex value. */
2368 likely_spilled_retval_p (rtx_insn
*insn
)
2370 rtx_insn
*use
= BB_END (this_basic_block
);
2373 unsigned regno
, nregs
;
2374 /* We assume here that no machine mode needs more than
2375 32 hard registers when the value overlaps with a register
2376 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2378 struct likely_spilled_retval_info info
;
2380 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2382 reg
= XEXP (PATTERN (use
), 0);
2383 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2385 regno
= REGNO (reg
);
2386 nregs
= REG_NREGS (reg
);
2389 mask
= (2U << (nregs
- 1)) - 1;
2391 /* Disregard parts of the return value that are set later. */
2395 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2397 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2400 /* Check if any of the (probably) live return value registers is
2405 if ((mask
& 1 << nregs
)
2406 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2412 /* Adjust INSN after we made a change to its destination.
2414 Changing the destination can invalidate notes that say something about
2415 the results of the insn and a LOG_LINK pointing to the insn. */
2418 adjust_for_new_dest (rtx_insn
*insn
)
2420 /* For notes, be conservative and simply remove them. */
2421 remove_reg_equal_equiv_notes (insn
);
2423 /* The new insn will have a destination that was previously the destination
2424 of an insn just above it. Call distribute_links to make a LOG_LINK from
2425 the next use of that destination. */
2427 rtx set
= single_set (insn
);
2430 rtx reg
= SET_DEST (set
);
2432 while (GET_CODE (reg
) == ZERO_EXTRACT
2433 || GET_CODE (reg
) == STRICT_LOW_PART
2434 || GET_CODE (reg
) == SUBREG
)
2435 reg
= XEXP (reg
, 0);
2436 gcc_assert (REG_P (reg
));
2438 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2440 df_insn_rescan (insn
);
2443 /* Return TRUE if combine can reuse reg X in mode MODE.
2444 ADDED_SETS is nonzero if the original set is still required. */
2446 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2454 /* Allow hard registers if the new mode is legal, and occupies no more
2455 registers than the old mode. */
2456 if (regno
< FIRST_PSEUDO_REGISTER
)
2457 return (HARD_REGNO_MODE_OK (regno
, mode
)
2458 && REG_NREGS (x
) >= hard_regno_nregs
[regno
][mode
]);
2460 /* Or a pseudo that is only used once. */
2461 return (regno
< reg_n_sets_max
2462 && REG_N_SETS (regno
) == 1
2464 && !REG_USERVAR_P (x
));
2468 /* Check whether X, the destination of a set, refers to part of
2469 the register specified by REG. */
2472 reg_subword_p (rtx x
, rtx reg
)
2474 /* Check that reg is an integer mode register. */
2475 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2478 if (GET_CODE (x
) == STRICT_LOW_PART
2479 || GET_CODE (x
) == ZERO_EXTRACT
)
2482 return GET_CODE (x
) == SUBREG
2483 && SUBREG_REG (x
) == reg
2484 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2487 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2488 Note that the INSN should be deleted *after* removing dead edges, so
2489 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2490 but not for a (set (pc) (label_ref FOO)). */
2493 update_cfg_for_uncondjump (rtx_insn
*insn
)
2495 basic_block bb
= BLOCK_FOR_INSN (insn
);
2496 gcc_assert (BB_END (bb
) == insn
);
2498 purge_dead_edges (bb
);
2501 if (EDGE_COUNT (bb
->succs
) == 1)
2505 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2507 /* Remove barriers from the footer if there are any. */
2508 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2509 if (BARRIER_P (insn
))
2511 if (PREV_INSN (insn
))
2512 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2514 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2515 if (NEXT_INSN (insn
))
2516 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2518 else if (LABEL_P (insn
))
2523 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2524 by an arbitrary number of CLOBBERs. */
2526 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2528 if (GET_CODE (pat
) != PARALLEL
)
2531 int len
= XVECLEN (pat
, 0);
2536 for (i
= 0; i
< n
; i
++)
2537 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2538 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2540 for ( ; i
< len
; i
++)
2541 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
2542 || XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2548 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2549 CLOBBERs), can be split into individual SETs in that order, without
2550 changing semantics. */
2552 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2554 if (!insn_nothrow_p (insn
))
2557 rtx pat
= PATTERN (insn
);
2560 for (i
= 0; i
< n
; i
++)
2562 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2565 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2567 for (j
= i
+ 1; j
< n
; j
++)
2568 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2575 /* Try to combine the insns I0, I1 and I2 into I3.
2576 Here I0, I1 and I2 appear earlier than I3.
2577 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2580 If we are combining more than two insns and the resulting insn is not
2581 recognized, try splitting it into two insns. If that happens, I2 and I3
2582 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2583 Otherwise, I0, I1 and I2 are pseudo-deleted.
2585 Return 0 if the combination does not work. Then nothing is changed.
2586 If we did the combination, return the insn at which combine should
2589 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2590 new direct jump instruction.
2592 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2593 been I3 passed to an earlier try_combine within the same basic
2597 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2598 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2600 /* New patterns for I3 and I2, respectively. */
2601 rtx newpat
, newi2pat
= 0;
2602 rtvec newpat_vec_with_clobbers
= 0;
2603 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2604 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2606 int added_sets_0
, added_sets_1
, added_sets_2
;
2607 /* Total number of SETs to put into I3. */
2609 /* Nonzero if I2's or I1's body now appears in I3. */
2610 int i2_is_used
= 0, i1_is_used
= 0;
2611 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2612 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2613 /* Contains I3 if the destination of I3 is used in its source, which means
2614 that the old life of I3 is being killed. If that usage is placed into
2615 I2 and not in I3, a REG_DEAD note must be made. */
2616 rtx i3dest_killed
= 0;
2617 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2618 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2619 /* Copy of SET_SRC of I1 and I0, if needed. */
2620 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2621 /* Set if I2DEST was reused as a scratch register. */
2622 bool i2scratch
= false;
2623 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2624 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2625 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2626 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2627 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2628 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2629 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2630 /* Notes that must be added to REG_NOTES in I3 and I2. */
2631 rtx new_i3_notes
, new_i2_notes
;
2632 /* Notes that we substituted I3 into I2 instead of the normal case. */
2633 int i3_subst_into_i2
= 0;
2634 /* Notes that I1, I2 or I3 is a MULT operation. */
2637 int changed_i3_dest
= 0;
2640 rtx_insn
*temp_insn
;
2642 struct insn_link
*link
;
2644 rtx new_other_notes
;
2647 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2649 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2652 /* Only try four-insn combinations when there's high likelihood of
2653 success. Look for simple insns, such as loads of constants or
2654 binary operations involving a constant. */
2662 if (!flag_expensive_optimizations
)
2665 for (i
= 0; i
< 4; i
++)
2667 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2668 rtx set
= single_set (insn
);
2672 src
= SET_SRC (set
);
2673 if (CONSTANT_P (src
))
2678 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2680 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2681 || GET_CODE (src
) == LSHIFTRT
)
2685 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2686 are likely manipulating its value. Ideally we'll be able to combine
2687 all four insns into a bitfield insertion of some kind.
2689 Note the source in I0 might be inside a sign/zero extension and the
2690 memory modes in I0 and I3 might be different. So extract the address
2691 from the destination of I3 and search for it in the source of I0.
2693 In the event that there's a match but the source/dest do not actually
2694 refer to the same memory, the worst that happens is we try some
2695 combinations that we wouldn't have otherwise. */
2696 if ((set0
= single_set (i0
))
2697 /* Ensure the source of SET0 is a MEM, possibly buried inside
2699 && (GET_CODE (SET_SRC (set0
)) == MEM
2700 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2701 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2702 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2703 && (set3
= single_set (i3
))
2704 /* Ensure the destination of SET3 is a MEM. */
2705 && GET_CODE (SET_DEST (set3
)) == MEM
2706 /* Would it be better to extract the base address for the MEM
2707 in SET3 and look for that? I don't have cases where it matters
2708 but I could envision such cases. */
2709 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2712 if (ngood
< 2 && nshift
< 2)
2716 /* Exit early if one of the insns involved can't be used for
2719 || (i1
&& CALL_P (i1
))
2720 || (i0
&& CALL_P (i0
))
2721 || cant_combine_insn_p (i3
)
2722 || cant_combine_insn_p (i2
)
2723 || (i1
&& cant_combine_insn_p (i1
))
2724 || (i0
&& cant_combine_insn_p (i0
))
2725 || likely_spilled_retval_p (i3
))
2729 undobuf
.other_insn
= 0;
2731 /* Reset the hard register usage information. */
2732 CLEAR_HARD_REG_SET (newpat_used_regs
);
2734 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2737 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2738 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2740 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2741 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2743 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2744 INSN_UID (i2
), INSN_UID (i3
));
2747 /* If multiple insns feed into one of I2 or I3, they can be in any
2748 order. To simplify the code below, reorder them in sequence. */
2749 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2751 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2753 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2756 added_links_insn
= 0;
2758 /* First check for one important special case that the code below will
2759 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2760 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2761 we may be able to replace that destination with the destination of I3.
2762 This occurs in the common code where we compute both a quotient and
2763 remainder into a structure, in which case we want to do the computation
2764 directly into the structure to avoid register-register copies.
2766 Note that this case handles both multiple sets in I2 and also cases
2767 where I2 has a number of CLOBBERs inside the PARALLEL.
2769 We make very conservative checks below and only try to handle the
2770 most common cases of this. For example, we only handle the case
2771 where I2 and I3 are adjacent to avoid making difficult register
2774 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2775 && REG_P (SET_SRC (PATTERN (i3
)))
2776 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2777 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2778 && GET_CODE (PATTERN (i2
)) == PARALLEL
2779 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2780 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2781 below would need to check what is inside (and reg_overlap_mentioned_p
2782 doesn't support those codes anyway). Don't allow those destinations;
2783 the resulting insn isn't likely to be recognized anyway. */
2784 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2785 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2786 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2787 SET_DEST (PATTERN (i3
)))
2788 && next_active_insn (i2
) == i3
)
2790 rtx p2
= PATTERN (i2
);
2792 /* Make sure that the destination of I3,
2793 which we are going to substitute into one output of I2,
2794 is not used within another output of I2. We must avoid making this:
2795 (parallel [(set (mem (reg 69)) ...)
2796 (set (reg 69) ...)])
2797 which is not well-defined as to order of actions.
2798 (Besides, reload can't handle output reloads for this.)
2800 The problem can also happen if the dest of I3 is a memory ref,
2801 if another dest in I2 is an indirect memory ref.
2803 Neither can this PARALLEL be an asm. We do not allow combining
2804 that usually (see can_combine_p), so do not here either. */
2806 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2808 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2809 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2810 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2811 SET_DEST (XVECEXP (p2
, 0, i
))))
2813 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2814 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2819 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2820 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2821 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2826 subst_low_luid
= DF_INSN_LUID (i2
);
2828 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2829 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2830 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2831 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2833 /* Replace the dest in I2 with our dest and make the resulting
2834 insn the new pattern for I3. Then skip to where we validate
2835 the pattern. Everything was set up above. */
2836 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2838 i3_subst_into_i2
= 1;
2839 goto validate_replacement
;
2843 /* If I2 is setting a pseudo to a constant and I3 is setting some
2844 sub-part of it to another constant, merge them by making a new
2847 && (temp_expr
= single_set (i2
)) != 0
2848 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2849 && GET_CODE (PATTERN (i3
)) == SET
2850 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2851 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2853 rtx dest
= SET_DEST (PATTERN (i3
));
2857 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2859 if (CONST_INT_P (XEXP (dest
, 1))
2860 && CONST_INT_P (XEXP (dest
, 2)))
2862 width
= INTVAL (XEXP (dest
, 1));
2863 offset
= INTVAL (XEXP (dest
, 2));
2864 dest
= XEXP (dest
, 0);
2865 if (BITS_BIG_ENDIAN
)
2866 offset
= GET_MODE_PRECISION (GET_MODE (dest
)) - width
- offset
;
2871 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2872 dest
= XEXP (dest
, 0);
2873 width
= GET_MODE_PRECISION (GET_MODE (dest
));
2879 /* If this is the low part, we're done. */
2880 if (subreg_lowpart_p (dest
))
2882 /* Handle the case where inner is twice the size of outer. */
2883 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr
)))
2884 == 2 * GET_MODE_PRECISION (GET_MODE (dest
)))
2885 offset
+= GET_MODE_PRECISION (GET_MODE (dest
));
2886 /* Otherwise give up for now. */
2893 rtx inner
= SET_SRC (PATTERN (i3
));
2894 rtx outer
= SET_SRC (temp_expr
);
2897 = wi::insert (rtx_mode_t (outer
, GET_MODE (SET_DEST (temp_expr
))),
2898 rtx_mode_t (inner
, GET_MODE (dest
)),
2903 subst_low_luid
= DF_INSN_LUID (i2
);
2904 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2905 i2dest
= SET_DEST (temp_expr
);
2906 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2908 /* Replace the source in I2 with the new constant and make the
2909 resulting insn the new pattern for I3. Then skip to where we
2910 validate the pattern. Everything was set up above. */
2911 SUBST (SET_SRC (temp_expr
),
2912 immed_wide_int_const (o
, GET_MODE (SET_DEST (temp_expr
))));
2914 newpat
= PATTERN (i2
);
2916 /* The dest of I3 has been replaced with the dest of I2. */
2917 changed_i3_dest
= 1;
2918 goto validate_replacement
;
2922 /* If we have no I1 and I2 looks like:
2923 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2925 make up a dummy I1 that is
2928 (set (reg:CC X) (compare:CC Y (const_int 0)))
2930 (We can ignore any trailing CLOBBERs.)
2932 This undoes a previous combination and allows us to match a branch-and-
2935 if (!HAVE_cc0
&& i1
== 0
2936 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2937 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2939 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2940 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2941 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2942 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2943 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2944 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2946 /* We make I1 with the same INSN_UID as I2. This gives it
2947 the same DF_INSN_LUID for value tracking. Our fake I1 will
2948 never appear in the insn stream so giving it the same INSN_UID
2949 as I2 will not cause a problem. */
2951 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2952 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2954 INSN_UID (i1
) = INSN_UID (i2
);
2956 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2957 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2958 SET_DEST (PATTERN (i1
)));
2959 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2960 SUBST_LINK (LOG_LINKS (i2
),
2961 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2964 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2965 make those two SETs separate I1 and I2 insns, and make an I0 that is
2967 if (!HAVE_cc0
&& i0
== 0
2968 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2969 && can_split_parallel_of_n_reg_sets (i2
, 2)
2970 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2971 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2973 /* If there is no I1, there is no I0 either. */
2976 /* We make I1 with the same INSN_UID as I2. This gives it
2977 the same DF_INSN_LUID for value tracking. Our fake I1 will
2978 never appear in the insn stream so giving it the same INSN_UID
2979 as I2 will not cause a problem. */
2981 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2982 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2984 INSN_UID (i1
) = INSN_UID (i2
);
2986 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2989 /* Verify that I2 and I1 are valid for combining. */
2990 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
2991 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
2993 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
3000 /* Record whether I2DEST is used in I2SRC and similarly for the other
3001 cases. Knowing this will help in register status updating below. */
3002 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3003 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3004 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3005 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3006 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3007 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3008 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3009 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3010 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3012 /* For the earlier insns, determine which of the subsequent ones they
3014 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3015 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3016 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3017 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3018 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3020 /* Ensure that I3's pattern can be the destination of combines. */
3021 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3022 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3023 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3024 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3031 /* See if any of the insns is a MULT operation. Unless one is, we will
3032 reject a combination that is, since it must be slower. Be conservative
3034 if (GET_CODE (i2src
) == MULT
3035 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3036 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3037 || (GET_CODE (PATTERN (i3
)) == SET
3038 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3041 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3042 We used to do this EXCEPT in one case: I3 has a post-inc in an
3043 output operand. However, that exception can give rise to insns like
3045 which is a famous insn on the PDP-11 where the value of r3 used as the
3046 source was model-dependent. Avoid this sort of thing. */
3049 if (!(GET_CODE (PATTERN (i3
)) == SET
3050 && REG_P (SET_SRC (PATTERN (i3
)))
3051 && MEM_P (SET_DEST (PATTERN (i3
)))
3052 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3053 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3054 /* It's not the exception. */
3059 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3060 if (REG_NOTE_KIND (link
) == REG_INC
3061 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3063 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3070 /* See if the SETs in I1 or I2 need to be kept around in the merged
3071 instruction: whenever the value set there is still needed past I3.
3072 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3074 For the SET in I1, we have two cases: if I1 and I2 independently feed
3075 into I3, the set in I1 needs to be kept around unless I1DEST dies
3076 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3077 in I1 needs to be kept around unless I1DEST dies or is set in either
3078 I2 or I3. The same considerations apply to I0. */
3080 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3083 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3084 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3089 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3090 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3091 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3092 && dead_or_set_p (i2
, i0dest
)));
3096 /* We are about to copy insns for the case where they need to be kept
3097 around. Check that they can be copied in the merged instruction. */
3099 if (targetm
.cannot_copy_insn_p
3100 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3101 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3102 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3108 /* If the set in I2 needs to be kept around, we must make a copy of
3109 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3110 PATTERN (I2), we are only substituting for the original I1DEST, not into
3111 an already-substituted copy. This also prevents making self-referential
3112 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3117 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3118 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3120 i2pat
= copy_rtx (PATTERN (i2
));
3125 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3126 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3128 i1pat
= copy_rtx (PATTERN (i1
));
3133 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3134 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3136 i0pat
= copy_rtx (PATTERN (i0
));
3141 /* Substitute in the latest insn for the regs set by the earlier ones. */
3143 maxreg
= max_reg_num ();
3147 /* Many machines that don't use CC0 have insns that can both perform an
3148 arithmetic operation and set the condition code. These operations will
3149 be represented as a PARALLEL with the first element of the vector
3150 being a COMPARE of an arithmetic operation with the constant zero.
3151 The second element of the vector will set some pseudo to the result
3152 of the same arithmetic operation. If we simplify the COMPARE, we won't
3153 match such a pattern and so will generate an extra insn. Here we test
3154 for this case, where both the comparison and the operation result are
3155 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3156 I2SRC. Later we will make the PARALLEL that contains I2. */
3158 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3159 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3160 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3161 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3164 rtx
*cc_use_loc
= NULL
;
3165 rtx_insn
*cc_use_insn
= NULL
;
3166 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3167 machine_mode compare_mode
, orig_compare_mode
;
3168 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3170 newpat
= PATTERN (i3
);
3171 newpat_dest
= SET_DEST (newpat
);
3172 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3174 if (undobuf
.other_insn
== 0
3175 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3178 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3179 compare_code
= simplify_compare_const (compare_code
,
3180 GET_MODE (i2dest
), op0
, &op1
);
3181 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3184 /* Do the rest only if op1 is const0_rtx, which may be the
3185 result of simplification. */
3186 if (op1
== const0_rtx
)
3188 /* If a single use of the CC is found, prepare to modify it
3189 when SELECT_CC_MODE returns a new CC-class mode, or when
3190 the above simplify_compare_const() returned a new comparison
3191 operator. undobuf.other_insn is assigned the CC use insn
3192 when modifying it. */
3195 #ifdef SELECT_CC_MODE
3196 machine_mode new_mode
3197 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3198 if (new_mode
!= orig_compare_mode
3199 && can_change_dest_mode (SET_DEST (newpat
),
3200 added_sets_2
, new_mode
))
3202 unsigned int regno
= REGNO (newpat_dest
);
3203 compare_mode
= new_mode
;
3204 if (regno
< FIRST_PSEUDO_REGISTER
)
3205 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3208 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3209 newpat_dest
= regno_reg_rtx
[regno
];
3213 /* Cases for modifying the CC-using comparison. */
3214 if (compare_code
!= orig_compare_code
3215 /* ??? Do we need to verify the zero rtx? */
3216 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3218 /* Replace cc_use_loc with entire new RTX. */
3220 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3221 newpat_dest
, const0_rtx
));
3222 undobuf
.other_insn
= cc_use_insn
;
3224 else if (compare_mode
!= orig_compare_mode
)
3226 /* Just replace the CC reg with a new mode. */
3227 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3228 undobuf
.other_insn
= cc_use_insn
;
3232 /* Now we modify the current newpat:
3233 First, SET_DEST(newpat) is updated if the CC mode has been
3234 altered. For targets without SELECT_CC_MODE, this should be
3236 if (compare_mode
!= orig_compare_mode
)
3237 SUBST (SET_DEST (newpat
), newpat_dest
);
3238 /* This is always done to propagate i2src into newpat. */
3239 SUBST (SET_SRC (newpat
),
3240 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3241 /* Create new version of i2pat if needed; the below PARALLEL
3242 creation needs this to work correctly. */
3243 if (! rtx_equal_p (i2src
, op0
))
3244 i2pat
= gen_rtx_SET (i2dest
, op0
);
3249 if (i2_is_used
== 0)
3251 /* It is possible that the source of I2 or I1 may be performing
3252 an unneeded operation, such as a ZERO_EXTEND of something
3253 that is known to have the high part zero. Handle that case
3254 by letting subst look at the inner insns.
3256 Another way to do this would be to have a function that tries
3257 to simplify a single insn instead of merging two or more
3258 insns. We don't do this because of the potential of infinite
3259 loops and because of the potential extra memory required.
3260 However, doing it the way we are is a bit of a kludge and
3261 doesn't catch all cases.
3263 But only do this if -fexpensive-optimizations since it slows
3264 things down and doesn't usually win.
3266 This is not done in the COMPARE case above because the
3267 unmodified I2PAT is used in the PARALLEL and so a pattern
3268 with a modified I2SRC would not match. */
3270 if (flag_expensive_optimizations
)
3272 /* Pass pc_rtx so no substitutions are done, just
3276 subst_low_luid
= DF_INSN_LUID (i1
);
3277 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3280 subst_low_luid
= DF_INSN_LUID (i2
);
3281 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3284 n_occurrences
= 0; /* `subst' counts here */
3285 subst_low_luid
= DF_INSN_LUID (i2
);
3287 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3288 copy of I2SRC each time we substitute it, in order to avoid creating
3289 self-referential RTL when we will be substituting I1SRC for I1DEST
3290 later. Likewise if I0 feeds into I2, either directly or indirectly
3291 through I1, and I0DEST is in I0SRC. */
3292 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3293 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3294 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3295 && i0dest_in_i0src
));
3298 /* Record whether I2's body now appears within I3's body. */
3299 i2_is_used
= n_occurrences
;
3302 /* If we already got a failure, don't try to do more. Otherwise, try to
3303 substitute I1 if we have it. */
3305 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3307 /* Check that an autoincrement side-effect on I1 has not been lost.
3308 This happens if I1DEST is mentioned in I2 and dies there, and
3309 has disappeared from the new pattern. */
3310 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3312 && dead_or_set_p (i2
, i1dest
)
3313 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3314 /* Before we can do this substitution, we must redo the test done
3315 above (see detailed comments there) that ensures I1DEST isn't
3316 mentioned in any SETs in NEWPAT that are field assignments. */
3317 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3325 subst_low_luid
= DF_INSN_LUID (i1
);
3327 /* If the following substitution will modify I1SRC, make a copy of it
3328 for the case where it is substituted for I1DEST in I2PAT later. */
3329 if (added_sets_2
&& i1_feeds_i2_n
)
3330 i1src_copy
= copy_rtx (i1src
);
3332 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3333 copy of I1SRC each time we substitute it, in order to avoid creating
3334 self-referential RTL when we will be substituting I0SRC for I0DEST
3336 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3337 i0_feeds_i1_n
&& i0dest_in_i0src
);
3340 /* Record whether I1's body now appears within I3's body. */
3341 i1_is_used
= n_occurrences
;
3344 /* Likewise for I0 if we have it. */
3346 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3348 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3349 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3350 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3351 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3352 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3359 /* If the following substitution will modify I0SRC, make a copy of it
3360 for the case where it is substituted for I0DEST in I1PAT later. */
3361 if (added_sets_1
&& i0_feeds_i1_n
)
3362 i0src_copy
= copy_rtx (i0src
);
3363 /* And a copy for I0DEST in I2PAT substitution. */
3364 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3365 || (i0_feeds_i2_n
)))
3366 i0src_copy2
= copy_rtx (i0src
);
3369 subst_low_luid
= DF_INSN_LUID (i0
);
3370 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3374 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3375 to count all the ways that I2SRC and I1SRC can be used. */
3376 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3377 && i2_is_used
+ added_sets_2
> 1)
3378 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3379 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3381 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3382 && (n_occurrences
+ added_sets_0
3383 + (added_sets_1
&& i0_feeds_i1_n
)
3384 + (added_sets_2
&& i0_feeds_i2_n
)
3386 /* Fail if we tried to make a new register. */
3387 || max_reg_num () != maxreg
3388 /* Fail if we couldn't do something and have a CLOBBER. */
3389 || GET_CODE (newpat
) == CLOBBER
3390 /* Fail if this new pattern is a MULT and we didn't have one before
3391 at the outer level. */
3392 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3399 /* If the actions of the earlier insns must be kept
3400 in addition to substituting them into the latest one,
3401 we must make a new PARALLEL for the latest insn
3402 to hold additional the SETs. */
3404 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3406 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3409 if (GET_CODE (newpat
) == PARALLEL
)
3411 rtvec old
= XVEC (newpat
, 0);
3412 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3413 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3414 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3415 sizeof (old
->elem
[0]) * old
->num_elem
);
3420 total_sets
= 1 + extra_sets
;
3421 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3422 XVECEXP (newpat
, 0, 0) = old
;
3426 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3432 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3434 XVECEXP (newpat
, 0, --total_sets
) = t
;
3440 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3441 i0_feeds_i1_n
&& i0dest_in_i0src
);
3442 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3443 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3445 XVECEXP (newpat
, 0, --total_sets
) = t
;
3449 validate_replacement
:
3451 /* Note which hard regs this insn has as inputs. */
3452 mark_used_regs_combine (newpat
);
3454 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3455 consider splitting this pattern, we might need these clobbers. */
3456 if (i1
&& GET_CODE (newpat
) == PARALLEL
3457 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3459 int len
= XVECLEN (newpat
, 0);
3461 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3462 for (i
= 0; i
< len
; i
++)
3463 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3466 /* We have recognized nothing yet. */
3467 insn_code_number
= -1;
3469 /* See if this is a PARALLEL of two SETs where one SET's destination is
3470 a register that is unused and this isn't marked as an instruction that
3471 might trap in an EH region. In that case, we just need the other SET.
3472 We prefer this over the PARALLEL.
3474 This can occur when simplifying a divmod insn. We *must* test for this
3475 case here because the code below that splits two independent SETs doesn't
3476 handle this case correctly when it updates the register status.
3478 It's pointless doing this if we originally had two sets, one from
3479 i3, and one from i2. Combining then splitting the parallel results
3480 in the original i2 again plus an invalid insn (which we delete).
3481 The net effect is only to move instructions around, which makes
3482 debug info less accurate. */
3484 if (!(added_sets_2
&& i1
== 0)
3485 && is_parallel_of_n_reg_sets (newpat
, 2)
3486 && asm_noperands (newpat
) < 0)
3488 rtx set0
= XVECEXP (newpat
, 0, 0);
3489 rtx set1
= XVECEXP (newpat
, 0, 1);
3490 rtx oldpat
= newpat
;
3492 if (((REG_P (SET_DEST (set1
))
3493 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3494 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3495 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3496 && insn_nothrow_p (i3
)
3497 && !side_effects_p (SET_SRC (set1
)))
3500 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3503 else if (((REG_P (SET_DEST (set0
))
3504 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3505 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3506 && find_reg_note (i3
, REG_UNUSED
,
3507 SUBREG_REG (SET_DEST (set0
)))))
3508 && insn_nothrow_p (i3
)
3509 && !side_effects_p (SET_SRC (set0
)))
3512 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3514 if (insn_code_number
>= 0)
3515 changed_i3_dest
= 1;
3518 if (insn_code_number
< 0)
3522 /* Is the result of combination a valid instruction? */
3523 if (insn_code_number
< 0)
3524 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3526 /* If we were combining three insns and the result is a simple SET
3527 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3528 insns. There are two ways to do this. It can be split using a
3529 machine-specific method (like when you have an addition of a large
3530 constant) or by combine in the function find_split_point. */
3532 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3533 && asm_noperands (newpat
) < 0)
3535 rtx parallel
, *split
;
3536 rtx_insn
*m_split_insn
;
3538 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3539 use I2DEST as a scratch register will help. In the latter case,
3540 convert I2DEST to the mode of the source of NEWPAT if we can. */
3542 m_split_insn
= combine_split_insns (newpat
, i3
);
3544 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3545 inputs of NEWPAT. */
3547 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3548 possible to try that as a scratch reg. This would require adding
3549 more code to make it work though. */
3551 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3553 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3555 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3556 (temporarily, until we are committed to this instruction
3557 combination) does not work: for example, any call to nonzero_bits
3558 on the register (from a splitter in the MD file, for example)
3559 will get the old information, which is invalid.
3561 Since nowadays we can create registers during combine just fine,
3562 we should just create a new one here, not reuse i2dest. */
3564 /* First try to split using the original register as a
3565 scratch register. */
3566 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3567 gen_rtvec (2, newpat
,
3568 gen_rtx_CLOBBER (VOIDmode
,
3570 m_split_insn
= combine_split_insns (parallel
, i3
);
3572 /* If that didn't work, try changing the mode of I2DEST if
3574 if (m_split_insn
== 0
3575 && new_mode
!= GET_MODE (i2dest
)
3576 && new_mode
!= VOIDmode
3577 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3579 machine_mode old_mode
= GET_MODE (i2dest
);
3582 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3583 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3586 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3587 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3590 parallel
= (gen_rtx_PARALLEL
3592 gen_rtvec (2, newpat
,
3593 gen_rtx_CLOBBER (VOIDmode
,
3595 m_split_insn
= combine_split_insns (parallel
, i3
);
3597 if (m_split_insn
== 0
3598 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3602 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3603 buf
= undobuf
.undos
;
3604 undobuf
.undos
= buf
->next
;
3605 buf
->next
= undobuf
.frees
;
3606 undobuf
.frees
= buf
;
3610 i2scratch
= m_split_insn
!= 0;
3613 /* If recog_for_combine has discarded clobbers, try to use them
3614 again for the split. */
3615 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3617 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3618 m_split_insn
= combine_split_insns (parallel
, i3
);
3621 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3623 rtx m_split_pat
= PATTERN (m_split_insn
);
3624 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3625 if (insn_code_number
>= 0)
3626 newpat
= m_split_pat
;
3628 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3629 && (next_nonnote_nondebug_insn (i2
) == i3
3630 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3633 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3634 newi2pat
= PATTERN (m_split_insn
);
3636 i3set
= single_set (NEXT_INSN (m_split_insn
));
3637 i2set
= single_set (m_split_insn
);
3639 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3641 /* If I2 or I3 has multiple SETs, we won't know how to track
3642 register status, so don't use these insns. If I2's destination
3643 is used between I2 and I3, we also can't use these insns. */
3645 if (i2_code_number
>= 0 && i2set
&& i3set
3646 && (next_nonnote_nondebug_insn (i2
) == i3
3647 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3648 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3650 if (insn_code_number
>= 0)
3653 /* It is possible that both insns now set the destination of I3.
3654 If so, we must show an extra use of it. */
3656 if (insn_code_number
>= 0)
3658 rtx new_i3_dest
= SET_DEST (i3set
);
3659 rtx new_i2_dest
= SET_DEST (i2set
);
3661 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3662 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3663 || GET_CODE (new_i3_dest
) == SUBREG
)
3664 new_i3_dest
= XEXP (new_i3_dest
, 0);
3666 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3667 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3668 || GET_CODE (new_i2_dest
) == SUBREG
)
3669 new_i2_dest
= XEXP (new_i2_dest
, 0);
3671 if (REG_P (new_i3_dest
)
3672 && REG_P (new_i2_dest
)
3673 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3674 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3675 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3679 /* If we can split it and use I2DEST, go ahead and see if that
3680 helps things be recognized. Verify that none of the registers
3681 are set between I2 and I3. */
3682 if (insn_code_number
< 0
3683 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3684 && (!HAVE_cc0
|| REG_P (i2dest
))
3685 /* We need I2DEST in the proper mode. If it is a hard register
3686 or the only use of a pseudo, we can change its mode.
3687 Make sure we don't change a hard register to have a mode that
3688 isn't valid for it, or change the number of registers. */
3689 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3690 || GET_MODE (*split
) == VOIDmode
3691 || can_change_dest_mode (i2dest
, added_sets_2
,
3693 && (next_nonnote_nondebug_insn (i2
) == i3
3694 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3695 /* We can't overwrite I2DEST if its value is still used by
3697 && ! reg_referenced_p (i2dest
, newpat
))
3699 rtx newdest
= i2dest
;
3700 enum rtx_code split_code
= GET_CODE (*split
);
3701 machine_mode split_mode
= GET_MODE (*split
);
3702 bool subst_done
= false;
3703 newi2pat
= NULL_RTX
;
3707 /* *SPLIT may be part of I2SRC, so make sure we have the
3708 original expression around for later debug processing.
3709 We should not need I2SRC any more in other cases. */
3710 if (MAY_HAVE_DEBUG_INSNS
)
3711 i2src
= copy_rtx (i2src
);
3715 /* Get NEWDEST as a register in the proper mode. We have already
3716 validated that we can do this. */
3717 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3719 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3720 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3723 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3724 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3728 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3729 an ASHIFT. This can occur if it was inside a PLUS and hence
3730 appeared to be a memory address. This is a kludge. */
3731 if (split_code
== MULT
3732 && CONST_INT_P (XEXP (*split
, 1))
3733 && INTVAL (XEXP (*split
, 1)) > 0
3734 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3736 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3737 XEXP (*split
, 0), GEN_INT (i
)));
3738 /* Update split_code because we may not have a multiply
3740 split_code
= GET_CODE (*split
);
3743 /* Similarly for (plus (mult FOO (const_int pow2))). */
3744 if (split_code
== PLUS
3745 && GET_CODE (XEXP (*split
, 0)) == MULT
3746 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3747 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3748 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3750 rtx nsplit
= XEXP (*split
, 0);
3751 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3752 XEXP (nsplit
, 0), GEN_INT (i
)));
3753 /* Update split_code because we may not have a multiply
3755 split_code
= GET_CODE (*split
);
3758 #ifdef INSN_SCHEDULING
3759 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3760 be written as a ZERO_EXTEND. */
3761 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3763 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3764 what it really is. */
3765 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3767 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3768 SUBREG_REG (*split
)));
3770 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3771 SUBREG_REG (*split
)));
3775 /* Attempt to split binary operators using arithmetic identities. */
3776 if (BINARY_P (SET_SRC (newpat
))
3777 && split_mode
== GET_MODE (SET_SRC (newpat
))
3778 && ! side_effects_p (SET_SRC (newpat
)))
3780 rtx setsrc
= SET_SRC (newpat
);
3781 machine_mode mode
= GET_MODE (setsrc
);
3782 enum rtx_code code
= GET_CODE (setsrc
);
3783 rtx src_op0
= XEXP (setsrc
, 0);
3784 rtx src_op1
= XEXP (setsrc
, 1);
3786 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3787 if (rtx_equal_p (src_op0
, src_op1
))
3789 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3790 SUBST (XEXP (setsrc
, 0), newdest
);
3791 SUBST (XEXP (setsrc
, 1), newdest
);
3794 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3795 else if ((code
== PLUS
|| code
== MULT
)
3796 && GET_CODE (src_op0
) == code
3797 && GET_CODE (XEXP (src_op0
, 0)) == code
3798 && (INTEGRAL_MODE_P (mode
)
3799 || (FLOAT_MODE_P (mode
)
3800 && flag_unsafe_math_optimizations
)))
3802 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3803 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3804 rtx r
= XEXP (src_op0
, 1);
3807 /* Split both "((X op Y) op X) op Y" and
3808 "((X op Y) op Y) op X" as "T op T" where T is
3810 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3811 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3813 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3814 SUBST (XEXP (setsrc
, 0), newdest
);
3815 SUBST (XEXP (setsrc
, 1), newdest
);
3818 /* Split "((X op X) op Y) op Y)" as "T op T" where
3820 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3822 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3823 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3824 SUBST (XEXP (setsrc
, 0), newdest
);
3825 SUBST (XEXP (setsrc
, 1), newdest
);
3833 newi2pat
= gen_rtx_SET (newdest
, *split
);
3834 SUBST (*split
, newdest
);
3837 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3839 /* recog_for_combine might have added CLOBBERs to newi2pat.
3840 Make sure NEWPAT does not depend on the clobbered regs. */
3841 if (GET_CODE (newi2pat
) == PARALLEL
)
3842 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3843 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3845 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3846 if (reg_overlap_mentioned_p (reg
, newpat
))
3853 /* If the split point was a MULT and we didn't have one before,
3854 don't use one now. */
3855 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3856 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3860 /* Check for a case where we loaded from memory in a narrow mode and
3861 then sign extended it, but we need both registers. In that case,
3862 we have a PARALLEL with both loads from the same memory location.
3863 We can split this into a load from memory followed by a register-register
3864 copy. This saves at least one insn, more if register allocation can
3867 We cannot do this if the destination of the first assignment is a
3868 condition code register or cc0. We eliminate this case by making sure
3869 the SET_DEST and SET_SRC have the same mode.
3871 We cannot do this if the destination of the second assignment is
3872 a register that we have already assumed is zero-extended. Similarly
3873 for a SUBREG of such a register. */
3875 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3876 && GET_CODE (newpat
) == PARALLEL
3877 && XVECLEN (newpat
, 0) == 2
3878 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3879 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3880 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3881 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3882 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3883 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3884 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3885 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3887 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3888 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3889 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3891 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3892 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3893 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3894 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3895 != GET_MODE_MASK (word_mode
))))
3896 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3897 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3899 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3900 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3901 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3902 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3903 != GET_MODE_MASK (word_mode
)))))
3904 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3905 SET_SRC (XVECEXP (newpat
, 0, 1)))
3906 && ! find_reg_note (i3
, REG_UNUSED
,
3907 SET_DEST (XVECEXP (newpat
, 0, 0))))
3911 newi2pat
= XVECEXP (newpat
, 0, 0);
3912 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3913 newpat
= XVECEXP (newpat
, 0, 1);
3914 SUBST (SET_SRC (newpat
),
3915 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3916 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3918 if (i2_code_number
>= 0)
3919 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3921 if (insn_code_number
>= 0)
3925 /* Similarly, check for a case where we have a PARALLEL of two independent
3926 SETs but we started with three insns. In this case, we can do the sets
3927 as two separate insns. This case occurs when some SET allows two
3928 other insns to combine, but the destination of that SET is still live.
3930 Also do this if we started with two insns and (at least) one of the
3931 resulting sets is a noop; this noop will be deleted later. */
3933 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3934 && GET_CODE (newpat
) == PARALLEL
3935 && XVECLEN (newpat
, 0) == 2
3936 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3937 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3938 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3939 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3940 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3941 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3942 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3943 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3944 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3945 XVECEXP (newpat
, 0, 0))
3946 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3947 XVECEXP (newpat
, 0, 1))
3948 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3949 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3951 rtx set0
= XVECEXP (newpat
, 0, 0);
3952 rtx set1
= XVECEXP (newpat
, 0, 1);
3954 /* Normally, it doesn't matter which of the two is done first,
3955 but the one that references cc0 can't be the second, and
3956 one which uses any regs/memory set in between i2 and i3 can't
3957 be first. The PARALLEL might also have been pre-existing in i3,
3958 so we need to make sure that we won't wrongly hoist a SET to i2
3959 that would conflict with a death note present in there. */
3960 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3961 && !(REG_P (SET_DEST (set1
))
3962 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3963 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3964 && find_reg_note (i2
, REG_DEAD
,
3965 SUBREG_REG (SET_DEST (set1
))))
3966 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3967 /* If I3 is a jump, ensure that set0 is a jump so that
3968 we do not create invalid RTL. */
3969 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3975 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3976 && !(REG_P (SET_DEST (set0
))
3977 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3978 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3979 && find_reg_note (i2
, REG_DEAD
,
3980 SUBREG_REG (SET_DEST (set0
))))
3981 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
3982 /* If I3 is a jump, ensure that set1 is a jump so that
3983 we do not create invalid RTL. */
3984 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
3996 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3998 if (i2_code_number
>= 0)
4000 /* recog_for_combine might have added CLOBBERs to newi2pat.
4001 Make sure NEWPAT does not depend on the clobbered regs. */
4002 if (GET_CODE (newi2pat
) == PARALLEL
)
4004 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4005 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4007 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4008 if (reg_overlap_mentioned_p (reg
, newpat
))
4016 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4020 /* If it still isn't recognized, fail and change things back the way they
4022 if ((insn_code_number
< 0
4023 /* Is the result a reasonable ASM_OPERANDS? */
4024 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4030 /* If we had to change another insn, make sure it is valid also. */
4031 if (undobuf
.other_insn
)
4033 CLEAR_HARD_REG_SET (newpat_used_regs
);
4035 other_pat
= PATTERN (undobuf
.other_insn
);
4036 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4039 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4046 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4047 they are adjacent to each other or not. */
4050 rtx_insn
*p
= prev_nonnote_insn (i3
);
4051 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4052 && sets_cc0_p (newi2pat
))
4059 /* Only allow this combination if insn_rtx_costs reports that the
4060 replacement instructions are cheaper than the originals. */
4061 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4067 if (MAY_HAVE_DEBUG_INSNS
)
4071 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4072 if (undo
->kind
== UNDO_MODE
)
4074 rtx reg
= *undo
->where
.r
;
4075 machine_mode new_mode
= GET_MODE (reg
);
4076 machine_mode old_mode
= undo
->old_contents
.m
;
4078 /* Temporarily revert mode back. */
4079 adjust_reg_mode (reg
, old_mode
);
4081 if (reg
== i2dest
&& i2scratch
)
4083 /* If we used i2dest as a scratch register with a
4084 different mode, substitute it for the original
4085 i2src while its original mode is temporarily
4086 restored, and then clear i2scratch so that we don't
4087 do it again later. */
4088 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4091 /* Put back the new mode. */
4092 adjust_reg_mode (reg
, new_mode
);
4096 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4097 rtx_insn
*first
, *last
;
4102 last
= last_combined_insn
;
4107 last
= undobuf
.other_insn
;
4109 if (DF_INSN_LUID (last
)
4110 < DF_INSN_LUID (last_combined_insn
))
4111 last
= last_combined_insn
;
4114 /* We're dealing with a reg that changed mode but not
4115 meaning, so we want to turn it into a subreg for
4116 the new mode. However, because of REG sharing and
4117 because its mode had already changed, we have to do
4118 it in two steps. First, replace any debug uses of
4119 reg, with its original mode temporarily restored,
4120 with this copy we have created; then, replace the
4121 copy with the SUBREG of the original shared reg,
4122 once again changed to the new mode. */
4123 propagate_for_debug (first
, last
, reg
, tempreg
,
4125 adjust_reg_mode (reg
, new_mode
);
4126 propagate_for_debug (first
, last
, tempreg
,
4127 lowpart_subreg (old_mode
, reg
, new_mode
),
4133 /* If we will be able to accept this, we have made a
4134 change to the destination of I3. This requires us to
4135 do a few adjustments. */
4137 if (changed_i3_dest
)
4139 PATTERN (i3
) = newpat
;
4140 adjust_for_new_dest (i3
);
4143 /* We now know that we can do this combination. Merge the insns and
4144 update the status of registers and LOG_LINKS. */
4146 if (undobuf
.other_insn
)
4150 PATTERN (undobuf
.other_insn
) = other_pat
;
4152 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4153 ensure that they are still valid. Then add any non-duplicate
4154 notes added by recog_for_combine. */
4155 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4157 next
= XEXP (note
, 1);
4159 if ((REG_NOTE_KIND (note
) == REG_DEAD
4160 && !reg_referenced_p (XEXP (note
, 0),
4161 PATTERN (undobuf
.other_insn
)))
4162 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4163 && !reg_set_p (XEXP (note
, 0),
4164 PATTERN (undobuf
.other_insn
)))
4165 /* Simply drop equal note since it may be no longer valid
4166 for other_insn. It may be possible to record that CC
4167 register is changed and only discard those notes, but
4168 in practice it's unnecessary complication and doesn't
4169 give any meaningful improvement.
4172 || REG_NOTE_KIND (note
) == REG_EQUAL
4173 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4174 remove_note (undobuf
.other_insn
, note
);
4177 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4178 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4185 struct insn_link
*link
;
4188 /* I3 now uses what used to be its destination and which is now
4189 I2's destination. This requires us to do a few adjustments. */
4190 PATTERN (i3
) = newpat
;
4191 adjust_for_new_dest (i3
);
4193 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4196 However, some later insn might be using I2's dest and have
4197 a LOG_LINK pointing at I3. We must remove this link.
4198 The simplest way to remove the link is to point it at I1,
4199 which we know will be a NOTE. */
4201 /* newi2pat is usually a SET here; however, recog_for_combine might
4202 have added some clobbers. */
4203 if (GET_CODE (newi2pat
) == PARALLEL
)
4204 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4206 ni2dest
= SET_DEST (newi2pat
);
4208 for (insn
= NEXT_INSN (i3
);
4209 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4210 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4211 insn
= NEXT_INSN (insn
))
4213 if (NONDEBUG_INSN_P (insn
)
4214 && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4216 FOR_EACH_LOG_LINK (link
, insn
)
4217 if (link
->insn
== i3
)
4226 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4227 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4230 /* Compute which registers we expect to eliminate. newi2pat may be setting
4231 either i3dest or i2dest, so we must check it. */
4232 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4233 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4236 /* For i1, we need to compute both local elimination and global
4237 elimination information with respect to newi2pat because i1dest
4238 may be the same as i3dest, in which case newi2pat may be setting
4239 i1dest. Global information is used when distributing REG_DEAD
4240 note for i2 and i3, in which case it does matter if newi2pat sets
4243 Local information is used when distributing REG_DEAD note for i1,
4244 in which case it doesn't matter if newi2pat sets i1dest or not.
4245 See PR62151, if we have four insns combination:
4247 i1: r1 <- i1src (using r0)
4249 i2: r0 <- i2src (using r1)
4250 i3: r3 <- i3src (using r0)
4252 From i1's point of view, r0 is eliminated, no matter if it is set
4253 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4254 should be discarded.
4256 Note local information only affects cases in forms like "I1->I2->I3",
4257 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4258 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4260 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4263 rtx elim_i1
= (local_elim_i1
== 0
4264 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4266 /* Same case as i1. */
4267 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4269 rtx elim_i0
= (local_elim_i0
== 0
4270 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4273 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4275 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4276 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4278 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4280 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4282 /* Ensure that we do not have something that should not be shared but
4283 occurs multiple times in the new insns. Check this by first
4284 resetting all the `used' flags and then copying anything is shared. */
4286 reset_used_flags (i3notes
);
4287 reset_used_flags (i2notes
);
4288 reset_used_flags (i1notes
);
4289 reset_used_flags (i0notes
);
4290 reset_used_flags (newpat
);
4291 reset_used_flags (newi2pat
);
4292 if (undobuf
.other_insn
)
4293 reset_used_flags (PATTERN (undobuf
.other_insn
));
4295 i3notes
= copy_rtx_if_shared (i3notes
);
4296 i2notes
= copy_rtx_if_shared (i2notes
);
4297 i1notes
= copy_rtx_if_shared (i1notes
);
4298 i0notes
= copy_rtx_if_shared (i0notes
);
4299 newpat
= copy_rtx_if_shared (newpat
);
4300 newi2pat
= copy_rtx_if_shared (newi2pat
);
4301 if (undobuf
.other_insn
)
4302 reset_used_flags (PATTERN (undobuf
.other_insn
));
4304 INSN_CODE (i3
) = insn_code_number
;
4305 PATTERN (i3
) = newpat
;
4307 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4309 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4310 link
= XEXP (link
, 1))
4314 /* I2SRC must still be meaningful at this point. Some
4315 splitting operations can invalidate I2SRC, but those
4316 operations do not apply to calls. */
4318 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4322 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4325 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4330 if (undobuf
.other_insn
)
4331 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4333 /* We had one special case above where I2 had more than one set and
4334 we replaced a destination of one of those sets with the destination
4335 of I3. In that case, we have to update LOG_LINKS of insns later
4336 in this basic block. Note that this (expensive) case is rare.
4338 Also, in this case, we must pretend that all REG_NOTEs for I2
4339 actually came from I3, so that REG_UNUSED notes from I2 will be
4340 properly handled. */
4342 if (i3_subst_into_i2
)
4344 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4345 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4346 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4347 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4348 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4349 && ! find_reg_note (i2
, REG_UNUSED
,
4350 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4351 for (temp_insn
= NEXT_INSN (i2
);
4353 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4354 || BB_HEAD (this_basic_block
) != temp_insn
);
4355 temp_insn
= NEXT_INSN (temp_insn
))
4356 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4357 FOR_EACH_LOG_LINK (link
, temp_insn
)
4358 if (link
->insn
== i2
)
4364 while (XEXP (link
, 1))
4365 link
= XEXP (link
, 1);
4366 XEXP (link
, 1) = i2notes
;
4373 LOG_LINKS (i3
) = NULL
;
4375 LOG_LINKS (i2
) = NULL
;
4380 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4381 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4383 INSN_CODE (i2
) = i2_code_number
;
4384 PATTERN (i2
) = newi2pat
;
4388 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4389 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4391 SET_INSN_DELETED (i2
);
4396 LOG_LINKS (i1
) = NULL
;
4398 if (MAY_HAVE_DEBUG_INSNS
)
4399 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4401 SET_INSN_DELETED (i1
);
4406 LOG_LINKS (i0
) = NULL
;
4408 if (MAY_HAVE_DEBUG_INSNS
)
4409 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4411 SET_INSN_DELETED (i0
);
4414 /* Get death notes for everything that is now used in either I3 or
4415 I2 and used to die in a previous insn. If we built two new
4416 patterns, move from I1 to I2 then I2 to I3 so that we get the
4417 proper movement on registers that I2 modifies. */
4420 from_luid
= DF_INSN_LUID (i0
);
4422 from_luid
= DF_INSN_LUID (i1
);
4424 from_luid
= DF_INSN_LUID (i2
);
4426 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4427 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4429 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4431 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4432 elim_i2
, elim_i1
, elim_i0
);
4434 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4435 elim_i2
, elim_i1
, elim_i0
);
4437 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4438 elim_i2
, local_elim_i1
, local_elim_i0
);
4440 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4441 elim_i2
, elim_i1
, local_elim_i0
);
4443 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4444 elim_i2
, elim_i1
, elim_i0
);
4446 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4447 know these are REG_UNUSED and want them to go to the desired insn,
4448 so we always pass it as i3. */
4450 if (newi2pat
&& new_i2_notes
)
4451 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4455 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4458 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4459 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4460 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4461 in that case, it might delete I2. Similarly for I2 and I1.
4462 Show an additional death due to the REG_DEAD note we make here. If
4463 we discard it in distribute_notes, we will decrement it again. */
4467 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4468 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4469 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4472 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4473 elim_i2
, elim_i1
, elim_i0
);
4476 if (i2dest_in_i2src
)
4478 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4479 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4480 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4481 NULL_RTX
, NULL_RTX
);
4483 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4484 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4487 if (i1dest_in_i1src
)
4489 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4490 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4491 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4492 NULL_RTX
, NULL_RTX
);
4494 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4495 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4498 if (i0dest_in_i0src
)
4500 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4501 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4502 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4503 NULL_RTX
, NULL_RTX
);
4505 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4506 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4509 distribute_links (i3links
);
4510 distribute_links (i2links
);
4511 distribute_links (i1links
);
4512 distribute_links (i0links
);
4516 struct insn_link
*link
;
4517 rtx_insn
*i2_insn
= 0;
4518 rtx i2_val
= 0, set
;
4520 /* The insn that used to set this register doesn't exist, and
4521 this life of the register may not exist either. See if one of
4522 I3's links points to an insn that sets I2DEST. If it does,
4523 that is now the last known value for I2DEST. If we don't update
4524 this and I2 set the register to a value that depended on its old
4525 contents, we will get confused. If this insn is used, thing
4526 will be set correctly in combine_instructions. */
4527 FOR_EACH_LOG_LINK (link
, i3
)
4528 if ((set
= single_set (link
->insn
)) != 0
4529 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4530 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4532 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4534 /* If the reg formerly set in I2 died only once and that was in I3,
4535 zero its use count so it won't make `reload' do any work. */
4537 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4538 && ! i2dest_in_i2src
4539 && REGNO (i2dest
) < reg_n_sets_max
)
4540 INC_REG_N_SETS (REGNO (i2dest
), -1);
4543 if (i1
&& REG_P (i1dest
))
4545 struct insn_link
*link
;
4546 rtx_insn
*i1_insn
= 0;
4547 rtx i1_val
= 0, set
;
4549 FOR_EACH_LOG_LINK (link
, i3
)
4550 if ((set
= single_set (link
->insn
)) != 0
4551 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4552 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4554 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4557 && ! i1dest_in_i1src
4558 && REGNO (i1dest
) < reg_n_sets_max
)
4559 INC_REG_N_SETS (REGNO (i1dest
), -1);
4562 if (i0
&& REG_P (i0dest
))
4564 struct insn_link
*link
;
4565 rtx_insn
*i0_insn
= 0;
4566 rtx i0_val
= 0, set
;
4568 FOR_EACH_LOG_LINK (link
, i3
)
4569 if ((set
= single_set (link
->insn
)) != 0
4570 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4571 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4573 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4576 && ! i0dest_in_i0src
4577 && REGNO (i0dest
) < reg_n_sets_max
)
4578 INC_REG_N_SETS (REGNO (i0dest
), -1);
4581 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4582 been made to this insn. The order is important, because newi2pat
4583 can affect nonzero_bits of newpat. */
4585 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4586 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4589 if (undobuf
.other_insn
!= NULL_RTX
)
4593 fprintf (dump_file
, "modifying other_insn ");
4594 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4596 df_insn_rescan (undobuf
.other_insn
);
4599 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4603 fprintf (dump_file
, "modifying insn i0 ");
4604 dump_insn_slim (dump_file
, i0
);
4606 df_insn_rescan (i0
);
4609 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4613 fprintf (dump_file
, "modifying insn i1 ");
4614 dump_insn_slim (dump_file
, i1
);
4616 df_insn_rescan (i1
);
4619 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4623 fprintf (dump_file
, "modifying insn i2 ");
4624 dump_insn_slim (dump_file
, i2
);
4626 df_insn_rescan (i2
);
4629 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4633 fprintf (dump_file
, "modifying insn i3 ");
4634 dump_insn_slim (dump_file
, i3
);
4636 df_insn_rescan (i3
);
4639 /* Set new_direct_jump_p if a new return or simple jump instruction
4640 has been created. Adjust the CFG accordingly. */
4641 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4643 *new_direct_jump_p
= 1;
4644 mark_jump_label (PATTERN (i3
), i3
, 0);
4645 update_cfg_for_uncondjump (i3
);
4648 if (undobuf
.other_insn
!= NULL_RTX
4649 && (returnjump_p (undobuf
.other_insn
)
4650 || any_uncondjump_p (undobuf
.other_insn
)))
4652 *new_direct_jump_p
= 1;
4653 update_cfg_for_uncondjump (undobuf
.other_insn
);
4656 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4657 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4659 basic_block bb
= BLOCK_FOR_INSN (i3
);
4661 remove_edge (split_block (bb
, i3
));
4662 emit_barrier_after_bb (bb
);
4663 *new_direct_jump_p
= 1;
4666 if (undobuf
.other_insn
4667 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4668 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4670 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4672 remove_edge (split_block (bb
, undobuf
.other_insn
));
4673 emit_barrier_after_bb (bb
);
4674 *new_direct_jump_p
= 1;
4677 /* A noop might also need cleaning up of CFG, if it comes from the
4678 simplification of a jump. */
4680 && GET_CODE (newpat
) == SET
4681 && SET_SRC (newpat
) == pc_rtx
4682 && SET_DEST (newpat
) == pc_rtx
)
4684 *new_direct_jump_p
= 1;
4685 update_cfg_for_uncondjump (i3
);
4688 if (undobuf
.other_insn
!= NULL_RTX
4689 && JUMP_P (undobuf
.other_insn
)
4690 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4691 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4692 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4694 *new_direct_jump_p
= 1;
4695 update_cfg_for_uncondjump (undobuf
.other_insn
);
4698 combine_successes
++;
4701 if (added_links_insn
4702 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4703 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4704 return added_links_insn
;
4706 return newi2pat
? i2
: i3
;
4709 /* Get a marker for undoing to the current state. */
4712 get_undo_marker (void)
4714 return undobuf
.undos
;
4717 /* Undo the modifications up to the marker. */
4720 undo_to_marker (void *marker
)
4722 struct undo
*undo
, *next
;
4724 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4732 *undo
->where
.r
= undo
->old_contents
.r
;
4735 *undo
->where
.i
= undo
->old_contents
.i
;
4738 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4741 *undo
->where
.l
= undo
->old_contents
.l
;
4747 undo
->next
= undobuf
.frees
;
4748 undobuf
.frees
= undo
;
4751 undobuf
.undos
= (struct undo
*) marker
;
4754 /* Undo all the modifications recorded in undobuf. */
4762 /* We've committed to accepting the changes we made. Move all
4763 of the undos to the free list. */
4768 struct undo
*undo
, *next
;
4770 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4773 undo
->next
= undobuf
.frees
;
4774 undobuf
.frees
= undo
;
4779 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4780 where we have an arithmetic expression and return that point. LOC will
4783 try_combine will call this function to see if an insn can be split into
4787 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4790 enum rtx_code code
= GET_CODE (x
);
4792 unsigned HOST_WIDE_INT len
= 0;
4793 HOST_WIDE_INT pos
= 0;
4795 rtx inner
= NULL_RTX
;
4796 scalar_int_mode mode
, inner_mode
;
4798 /* First special-case some codes. */
4802 #ifdef INSN_SCHEDULING
4803 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4805 if (MEM_P (SUBREG_REG (x
)))
4808 return find_split_point (&SUBREG_REG (x
), insn
, false);
4811 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4812 using LO_SUM and HIGH. */
4813 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4814 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4816 machine_mode address_mode
= get_address_mode (x
);
4819 gen_rtx_LO_SUM (address_mode
,
4820 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4822 return &XEXP (XEXP (x
, 0), 0);
4825 /* If we have a PLUS whose second operand is a constant and the
4826 address is not valid, perhaps will can split it up using
4827 the machine-specific way to split large constants. We use
4828 the first pseudo-reg (one of the virtual regs) as a placeholder;
4829 it will not remain in the result. */
4830 if (GET_CODE (XEXP (x
, 0)) == PLUS
4831 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4832 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4833 MEM_ADDR_SPACE (x
)))
4835 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4836 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4839 /* This should have produced two insns, each of which sets our
4840 placeholder. If the source of the second is a valid address,
4841 we can make put both sources together and make a split point
4845 && NEXT_INSN (seq
) != NULL_RTX
4846 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4847 && NONJUMP_INSN_P (seq
)
4848 && GET_CODE (PATTERN (seq
)) == SET
4849 && SET_DEST (PATTERN (seq
)) == reg
4850 && ! reg_mentioned_p (reg
,
4851 SET_SRC (PATTERN (seq
)))
4852 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4853 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4854 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4855 && memory_address_addr_space_p
4856 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4857 MEM_ADDR_SPACE (x
)))
4859 rtx src1
= SET_SRC (PATTERN (seq
));
4860 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4862 /* Replace the placeholder in SRC2 with SRC1. If we can
4863 find where in SRC2 it was placed, that can become our
4864 split point and we can replace this address with SRC2.
4865 Just try two obvious places. */
4867 src2
= replace_rtx (src2
, reg
, src1
);
4869 if (XEXP (src2
, 0) == src1
)
4870 split
= &XEXP (src2
, 0);
4871 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4872 && XEXP (XEXP (src2
, 0), 0) == src1
)
4873 split
= &XEXP (XEXP (src2
, 0), 0);
4877 SUBST (XEXP (x
, 0), src2
);
4882 /* If that didn't work, perhaps the first operand is complex and
4883 needs to be computed separately, so make a split point there.
4884 This will occur on machines that just support REG + CONST
4885 and have a constant moved through some previous computation. */
4887 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4888 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4889 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4890 return &XEXP (XEXP (x
, 0), 0);
4893 /* If we have a PLUS whose first operand is complex, try computing it
4894 separately by making a split there. */
4895 if (GET_CODE (XEXP (x
, 0)) == PLUS
4896 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4898 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4899 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4900 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4901 return &XEXP (XEXP (x
, 0), 0);
4905 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4906 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4907 we need to put the operand into a register. So split at that
4910 if (SET_DEST (x
) == cc0_rtx
4911 && GET_CODE (SET_SRC (x
)) != COMPARE
4912 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4913 && !OBJECT_P (SET_SRC (x
))
4914 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4915 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4916 return &SET_SRC (x
);
4918 /* See if we can split SET_SRC as it stands. */
4919 split
= find_split_point (&SET_SRC (x
), insn
, true);
4920 if (split
&& split
!= &SET_SRC (x
))
4923 /* See if we can split SET_DEST as it stands. */
4924 split
= find_split_point (&SET_DEST (x
), insn
, false);
4925 if (split
&& split
!= &SET_DEST (x
))
4928 /* See if this is a bitfield assignment with everything constant. If
4929 so, this is an IOR of an AND, so split it into that. */
4930 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4931 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
4933 && HWI_COMPUTABLE_MODE_P (inner_mode
)
4934 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4935 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4936 && CONST_INT_P (SET_SRC (x
))
4937 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4938 + INTVAL (XEXP (SET_DEST (x
), 2)))
4939 <= GET_MODE_PRECISION (inner_mode
))
4940 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4942 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4943 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4944 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4945 rtx dest
= XEXP (SET_DEST (x
), 0);
4946 unsigned HOST_WIDE_INT mask
4947 = (HOST_WIDE_INT_1U
<< len
) - 1;
4950 if (BITS_BIG_ENDIAN
)
4951 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
4953 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
4956 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
4959 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
4961 simplify_gen_binary (IOR
, inner_mode
,
4962 simplify_gen_binary (AND
, inner_mode
,
4967 SUBST (SET_DEST (x
), dest
);
4969 split
= find_split_point (&SET_SRC (x
), insn
, true);
4970 if (split
&& split
!= &SET_SRC (x
))
4974 /* Otherwise, see if this is an operation that we can split into two.
4975 If so, try to split that. */
4976 code
= GET_CODE (SET_SRC (x
));
4981 /* If we are AND'ing with a large constant that is only a single
4982 bit and the result is only being used in a context where we
4983 need to know if it is zero or nonzero, replace it with a bit
4984 extraction. This will avoid the large constant, which might
4985 have taken more than one insn to make. If the constant were
4986 not a valid argument to the AND but took only one insn to make,
4987 this is no worse, but if it took more than one insn, it will
4990 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4991 && REG_P (XEXP (SET_SRC (x
), 0))
4992 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
4993 && REG_P (SET_DEST (x
))
4994 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
4995 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
4996 && XEXP (*split
, 0) == SET_DEST (x
)
4997 && XEXP (*split
, 1) == const0_rtx
)
4999 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5000 XEXP (SET_SRC (x
), 0),
5001 pos
, NULL_RTX
, 1, 1, 0, 0);
5002 if (extraction
!= 0)
5004 SUBST (SET_SRC (x
), extraction
);
5005 return find_split_point (loc
, insn
, false);
5011 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5012 is known to be on, this can be converted into a NEG of a shift. */
5013 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5014 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5015 && 1 <= (pos
= exact_log2
5016 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5017 GET_MODE (XEXP (SET_SRC (x
), 0))))))
5019 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5023 gen_rtx_LSHIFTRT (mode
,
5024 XEXP (SET_SRC (x
), 0),
5027 split
= find_split_point (&SET_SRC (x
), insn
, true);
5028 if (split
&& split
!= &SET_SRC (x
))
5034 inner
= XEXP (SET_SRC (x
), 0);
5036 /* We can't optimize if either mode is a partial integer
5037 mode as we don't know how many bits are significant
5039 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5040 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5044 len
= GET_MODE_PRECISION (inner_mode
);
5050 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5052 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5053 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5055 inner
= XEXP (SET_SRC (x
), 0);
5056 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5057 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5059 if (BITS_BIG_ENDIAN
)
5060 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5061 unsignedp
= (code
== ZERO_EXTRACT
);
5070 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
))
5071 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5073 /* For unsigned, we have a choice of a shift followed by an
5074 AND or two shifts. Use two shifts for field sizes where the
5075 constant might be too large. We assume here that we can
5076 always at least get 8-bit constants in an AND insn, which is
5077 true for every current RISC. */
5079 if (unsignedp
&& len
<= 8)
5081 unsigned HOST_WIDE_INT mask
5082 = (HOST_WIDE_INT_1U
<< len
) - 1;
5086 (mode
, gen_lowpart (mode
, inner
),
5088 gen_int_mode (mask
, mode
)));
5090 split
= find_split_point (&SET_SRC (x
), insn
, true);
5091 if (split
&& split
!= &SET_SRC (x
))
5098 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5099 gen_rtx_ASHIFT (mode
,
5100 gen_lowpart (mode
, inner
),
5101 GEN_INT (GET_MODE_PRECISION (mode
)
5103 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5105 split
= find_split_point (&SET_SRC (x
), insn
, true);
5106 if (split
&& split
!= &SET_SRC (x
))
5111 /* See if this is a simple operation with a constant as the second
5112 operand. It might be that this constant is out of range and hence
5113 could be used as a split point. */
5114 if (BINARY_P (SET_SRC (x
))
5115 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5116 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5117 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5118 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5119 return &XEXP (SET_SRC (x
), 1);
5121 /* Finally, see if this is a simple operation with its first operand
5122 not in a register. The operation might require this operand in a
5123 register, so return it as a split point. We can always do this
5124 because if the first operand were another operation, we would have
5125 already found it as a split point. */
5126 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5127 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5128 return &XEXP (SET_SRC (x
), 0);
5134 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5135 it is better to write this as (not (ior A B)) so we can split it.
5136 Similarly for IOR. */
5137 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5140 gen_rtx_NOT (GET_MODE (x
),
5141 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5143 XEXP (XEXP (x
, 0), 0),
5144 XEXP (XEXP (x
, 1), 0))));
5145 return find_split_point (loc
, insn
, set_src
);
5148 /* Many RISC machines have a large set of logical insns. If the
5149 second operand is a NOT, put it first so we will try to split the
5150 other operand first. */
5151 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5153 rtx tem
= XEXP (x
, 0);
5154 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5155 SUBST (XEXP (x
, 1), tem
);
5161 /* Canonicalization can produce (minus A (mult B C)), where C is a
5162 constant. It may be better to try splitting (plus (mult B -C) A)
5163 instead if this isn't a multiply by a power of two. */
5164 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5165 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5166 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5168 machine_mode mode
= GET_MODE (x
);
5169 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5170 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5171 SUBST (*loc
, gen_rtx_PLUS (mode
,
5173 XEXP (XEXP (x
, 1), 0),
5174 gen_int_mode (other_int
,
5177 return find_split_point (loc
, insn
, set_src
);
5180 /* Split at a multiply-accumulate instruction. However if this is
5181 the SET_SRC, we likely do not have such an instruction and it's
5182 worthless to try this split. */
5184 && (GET_CODE (XEXP (x
, 0)) == MULT
5185 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5186 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5193 /* Otherwise, select our actions depending on our rtx class. */
5194 switch (GET_RTX_CLASS (code
))
5196 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5198 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5203 case RTX_COMM_ARITH
:
5205 case RTX_COMM_COMPARE
:
5206 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5211 /* Some machines have (and (shift ...) ...) insns. If X is not
5212 an AND, but XEXP (X, 0) is, use it as our split point. */
5213 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5214 return &XEXP (x
, 0);
5216 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5222 /* Otherwise, we don't have a split point. */
5227 /* Throughout X, replace FROM with TO, and return the result.
5228 The result is TO if X is FROM;
5229 otherwise the result is X, but its contents may have been modified.
5230 If they were modified, a record was made in undobuf so that
5231 undo_all will (among other things) return X to its original state.
5233 If the number of changes necessary is too much to record to undo,
5234 the excess changes are not made, so the result is invalid.
5235 The changes already made can still be undone.
5236 undobuf.num_undo is incremented for such changes, so by testing that
5237 the caller can tell whether the result is valid.
5239 `n_occurrences' is incremented each time FROM is replaced.
5241 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5243 IN_COND is nonzero if we are at the top level of a condition.
5245 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5246 by copying if `n_occurrences' is nonzero. */
5249 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5251 enum rtx_code code
= GET_CODE (x
);
5252 machine_mode op0_mode
= VOIDmode
;
5257 /* Two expressions are equal if they are identical copies of a shared
5258 RTX or if they are both registers with the same register number
5261 #define COMBINE_RTX_EQUAL_P(X,Y) \
5263 || (REG_P (X) && REG_P (Y) \
5264 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5266 /* Do not substitute into clobbers of regs -- this will never result in
5268 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5271 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5274 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5277 /* If X and FROM are the same register but different modes, they
5278 will not have been seen as equal above. However, the log links code
5279 will make a LOG_LINKS entry for that case. If we do nothing, we
5280 will try to rerecognize our original insn and, when it succeeds,
5281 we will delete the feeding insn, which is incorrect.
5283 So force this insn not to match in this (rare) case. */
5284 if (! in_dest
&& code
== REG
&& REG_P (from
)
5285 && reg_overlap_mentioned_p (x
, from
))
5286 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5288 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5289 of which may contain things that can be combined. */
5290 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5293 /* It is possible to have a subexpression appear twice in the insn.
5294 Suppose that FROM is a register that appears within TO.
5295 Then, after that subexpression has been scanned once by `subst',
5296 the second time it is scanned, TO may be found. If we were
5297 to scan TO here, we would find FROM within it and create a
5298 self-referent rtl structure which is completely wrong. */
5299 if (COMBINE_RTX_EQUAL_P (x
, to
))
5302 /* Parallel asm_operands need special attention because all of the
5303 inputs are shared across the arms. Furthermore, unsharing the
5304 rtl results in recognition failures. Failure to handle this case
5305 specially can result in circular rtl.
5307 Solve this by doing a normal pass across the first entry of the
5308 parallel, and only processing the SET_DESTs of the subsequent
5311 if (code
== PARALLEL
5312 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5313 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5315 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5317 /* If this substitution failed, this whole thing fails. */
5318 if (GET_CODE (new_rtx
) == CLOBBER
5319 && XEXP (new_rtx
, 0) == const0_rtx
)
5322 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5324 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5326 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5329 && GET_CODE (dest
) != CC0
5330 && GET_CODE (dest
) != PC
)
5332 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5334 /* If this substitution failed, this whole thing fails. */
5335 if (GET_CODE (new_rtx
) == CLOBBER
5336 && XEXP (new_rtx
, 0) == const0_rtx
)
5339 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5345 len
= GET_RTX_LENGTH (code
);
5346 fmt
= GET_RTX_FORMAT (code
);
5348 /* We don't need to process a SET_DEST that is a register, CC0,
5349 or PC, so set up to skip this common case. All other cases
5350 where we want to suppress replacing something inside a
5351 SET_SRC are handled via the IN_DEST operand. */
5353 && (REG_P (SET_DEST (x
))
5354 || GET_CODE (SET_DEST (x
)) == CC0
5355 || GET_CODE (SET_DEST (x
)) == PC
))
5358 /* Trying to simplify the operands of a widening MULT is not likely
5359 to create RTL matching a machine insn. */
5361 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5362 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5363 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5364 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5365 && REG_P (XEXP (XEXP (x
, 0), 0))
5366 && REG_P (XEXP (XEXP (x
, 1), 0))
5371 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5374 op0_mode
= GET_MODE (XEXP (x
, 0));
5376 for (i
= 0; i
< len
; i
++)
5381 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5383 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5385 new_rtx
= (unique_copy
&& n_occurrences
5386 ? copy_rtx (to
) : to
);
5391 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5394 /* If this substitution failed, this whole thing
5396 if (GET_CODE (new_rtx
) == CLOBBER
5397 && XEXP (new_rtx
, 0) == const0_rtx
)
5401 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5404 else if (fmt
[i
] == 'e')
5406 /* If this is a register being set, ignore it. */
5407 new_rtx
= XEXP (x
, i
);
5410 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5412 || code
== STRICT_LOW_PART
))
5415 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5417 /* In general, don't install a subreg involving two
5418 modes not tieable. It can worsen register
5419 allocation, and can even make invalid reload
5420 insns, since the reg inside may need to be copied
5421 from in the outside mode, and that may be invalid
5422 if it is an fp reg copied in integer mode.
5424 We allow two exceptions to this: It is valid if
5425 it is inside another SUBREG and the mode of that
5426 SUBREG and the mode of the inside of TO is
5427 tieable and it is valid if X is a SET that copies
5430 if (GET_CODE (to
) == SUBREG
5431 && ! MODES_TIEABLE_P (GET_MODE (to
),
5432 GET_MODE (SUBREG_REG (to
)))
5433 && ! (code
== SUBREG
5434 && MODES_TIEABLE_P (GET_MODE (x
),
5435 GET_MODE (SUBREG_REG (to
))))
5439 && XEXP (x
, 0) == cc0_rtx
))))
5440 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5444 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5445 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5448 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5450 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5454 /* If we are in a SET_DEST, suppress most cases unless we
5455 have gone inside a MEM, in which case we want to
5456 simplify the address. We assume here that things that
5457 are actually part of the destination have their inner
5458 parts in the first expression. This is true for SUBREG,
5459 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5460 things aside from REG and MEM that should appear in a
5462 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5464 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5465 || code
== ZERO_EXTRACT
))
5468 code
== IF_THEN_ELSE
&& i
== 0,
5471 /* If we found that we will have to reject this combination,
5472 indicate that by returning the CLOBBER ourselves, rather than
5473 an expression containing it. This will speed things up as
5474 well as prevent accidents where two CLOBBERs are considered
5475 to be equal, thus producing an incorrect simplification. */
5477 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5480 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5482 machine_mode mode
= GET_MODE (x
);
5484 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5485 GET_MODE (SUBREG_REG (x
)),
5488 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5490 else if (CONST_SCALAR_INT_P (new_rtx
)
5491 && GET_CODE (x
) == ZERO_EXTEND
)
5493 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5494 new_rtx
, GET_MODE (XEXP (x
, 0)));
5498 SUBST (XEXP (x
, i
), new_rtx
);
5503 /* Check if we are loading something from the constant pool via float
5504 extension; in this case we would undo compress_float_constant
5505 optimization and degenerate constant load to an immediate value. */
5506 if (GET_CODE (x
) == FLOAT_EXTEND
5507 && MEM_P (XEXP (x
, 0))
5508 && MEM_READONLY_P (XEXP (x
, 0)))
5510 rtx tmp
= avoid_constant_pool_reference (x
);
5515 /* Try to simplify X. If the simplification changed the code, it is likely
5516 that further simplification will help, so loop, but limit the number
5517 of repetitions that will be performed. */
5519 for (i
= 0; i
< 4; i
++)
5521 /* If X is sufficiently simple, don't bother trying to do anything
5523 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5524 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5526 if (GET_CODE (x
) == code
)
5529 code
= GET_CODE (x
);
5531 /* We no longer know the original mode of operand 0 since we
5532 have changed the form of X) */
5533 op0_mode
= VOIDmode
;
5539 /* If X is a commutative operation whose operands are not in the canonical
5540 order, use substitutions to swap them. */
5543 maybe_swap_commutative_operands (rtx x
)
5545 if (COMMUTATIVE_ARITH_P (x
)
5546 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5548 rtx temp
= XEXP (x
, 0);
5549 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5550 SUBST (XEXP (x
, 1), temp
);
5554 /* Simplify X, a piece of RTL. We just operate on the expression at the
5555 outer level; call `subst' to simplify recursively. Return the new
5558 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5559 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5563 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5566 enum rtx_code code
= GET_CODE (x
);
5567 machine_mode mode
= GET_MODE (x
);
5568 scalar_int_mode int_mode
;
5572 /* If this is a commutative operation, put a constant last and a complex
5573 expression first. We don't need to do this for comparisons here. */
5574 maybe_swap_commutative_operands (x
);
5576 /* Try to fold this expression in case we have constants that weren't
5579 switch (GET_RTX_CLASS (code
))
5582 if (op0_mode
== VOIDmode
)
5583 op0_mode
= GET_MODE (XEXP (x
, 0));
5584 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5587 case RTX_COMM_COMPARE
:
5589 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5590 if (cmp_mode
== VOIDmode
)
5592 cmp_mode
= GET_MODE (XEXP (x
, 1));
5593 if (cmp_mode
== VOIDmode
)
5594 cmp_mode
= op0_mode
;
5596 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5597 XEXP (x
, 0), XEXP (x
, 1));
5600 case RTX_COMM_ARITH
:
5602 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5604 case RTX_BITFIELD_OPS
:
5606 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5607 XEXP (x
, 1), XEXP (x
, 2));
5616 code
= GET_CODE (temp
);
5617 op0_mode
= VOIDmode
;
5618 mode
= GET_MODE (temp
);
5621 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5622 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5623 things. Check for cases where both arms are testing the same
5626 Don't do anything if all operands are very simple. */
5629 && ((!OBJECT_P (XEXP (x
, 0))
5630 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5631 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5632 || (!OBJECT_P (XEXP (x
, 1))
5633 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5634 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5636 && (!OBJECT_P (XEXP (x
, 0))
5637 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5638 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5640 rtx cond
, true_rtx
, false_rtx
;
5642 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5644 /* If everything is a comparison, what we have is highly unlikely
5645 to be simpler, so don't use it. */
5646 && ! (COMPARISON_P (x
)
5647 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5649 rtx cop1
= const0_rtx
;
5650 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5652 if (cond_code
== NE
&& COMPARISON_P (cond
))
5655 /* Simplify the alternative arms; this may collapse the true and
5656 false arms to store-flag values. Be careful to use copy_rtx
5657 here since true_rtx or false_rtx might share RTL with x as a
5658 result of the if_then_else_cond call above. */
5659 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5660 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5662 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5663 is unlikely to be simpler. */
5664 if (general_operand (true_rtx
, VOIDmode
)
5665 && general_operand (false_rtx
, VOIDmode
))
5667 enum rtx_code reversed
;
5669 /* Restarting if we generate a store-flag expression will cause
5670 us to loop. Just drop through in this case. */
5672 /* If the result values are STORE_FLAG_VALUE and zero, we can
5673 just make the comparison operation. */
5674 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5675 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5677 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5678 && ((reversed
= reversed_comparison_code_parts
5679 (cond_code
, cond
, cop1
, NULL
))
5681 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5684 /* Likewise, we can make the negate of a comparison operation
5685 if the result values are - STORE_FLAG_VALUE and zero. */
5686 else if (CONST_INT_P (true_rtx
)
5687 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5688 && false_rtx
== const0_rtx
)
5689 x
= simplify_gen_unary (NEG
, mode
,
5690 simplify_gen_relational (cond_code
,
5694 else if (CONST_INT_P (false_rtx
)
5695 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5696 && true_rtx
== const0_rtx
5697 && ((reversed
= reversed_comparison_code_parts
5698 (cond_code
, cond
, cop1
, NULL
))
5700 x
= simplify_gen_unary (NEG
, mode
,
5701 simplify_gen_relational (reversed
,
5706 return gen_rtx_IF_THEN_ELSE (mode
,
5707 simplify_gen_relational (cond_code
,
5712 true_rtx
, false_rtx
);
5714 code
= GET_CODE (x
);
5715 op0_mode
= VOIDmode
;
5720 /* First see if we can apply the inverse distributive law. */
5721 if (code
== PLUS
|| code
== MINUS
5722 || code
== AND
|| code
== IOR
|| code
== XOR
)
5724 x
= apply_distributive_law (x
);
5725 code
= GET_CODE (x
);
5726 op0_mode
= VOIDmode
;
5729 /* If CODE is an associative operation not otherwise handled, see if we
5730 can associate some operands. This can win if they are constants or
5731 if they are logically related (i.e. (a & b) & a). */
5732 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5733 || code
== AND
|| code
== IOR
|| code
== XOR
5734 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5735 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5736 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5738 if (GET_CODE (XEXP (x
, 0)) == code
)
5740 rtx other
= XEXP (XEXP (x
, 0), 0);
5741 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5742 rtx inner_op1
= XEXP (x
, 1);
5745 /* Make sure we pass the constant operand if any as the second
5746 one if this is a commutative operation. */
5747 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5748 std::swap (inner_op0
, inner_op1
);
5749 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5750 : code
== DIV
? MULT
5752 mode
, inner_op0
, inner_op1
);
5754 /* For commutative operations, try the other pair if that one
5756 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5758 other
= XEXP (XEXP (x
, 0), 1);
5759 inner
= simplify_binary_operation (code
, mode
,
5760 XEXP (XEXP (x
, 0), 0),
5765 return simplify_gen_binary (code
, mode
, other
, inner
);
5769 /* A little bit of algebraic simplification here. */
5773 /* Ensure that our address has any ASHIFTs converted to MULT in case
5774 address-recognizing predicates are called later. */
5775 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5776 SUBST (XEXP (x
, 0), temp
);
5780 if (op0_mode
== VOIDmode
)
5781 op0_mode
= GET_MODE (SUBREG_REG (x
));
5783 /* See if this can be moved to simplify_subreg. */
5784 if (CONSTANT_P (SUBREG_REG (x
))
5785 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5786 /* Don't call gen_lowpart if the inner mode
5787 is VOIDmode and we cannot simplify it, as SUBREG without
5788 inner mode is invalid. */
5789 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5790 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5791 return gen_lowpart (mode
, SUBREG_REG (x
));
5793 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5797 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5802 /* If op is known to have all lower bits zero, the result is zero. */
5803 scalar_int_mode int_mode
, int_op0_mode
;
5805 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5806 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
5807 && (GET_MODE_PRECISION (int_mode
)
5808 < GET_MODE_PRECISION (int_op0_mode
))
5809 && (subreg_lowpart_offset (int_mode
, int_op0_mode
)
5811 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
5812 && (nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
5813 & GET_MODE_MASK (int_mode
)) == 0)
5814 return CONST0_RTX (int_mode
);
5817 /* Don't change the mode of the MEM if that would change the meaning
5819 if (MEM_P (SUBREG_REG (x
))
5820 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5821 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5822 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5823 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5825 /* Note that we cannot do any narrowing for non-constants since
5826 we might have been counting on using the fact that some bits were
5827 zero. We now do this in the SET. */
5832 temp
= expand_compound_operation (XEXP (x
, 0));
5834 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5835 replaced by (lshiftrt X C). This will convert
5836 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5838 if (GET_CODE (temp
) == ASHIFTRT
5839 && CONST_INT_P (XEXP (temp
, 1))
5840 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5841 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5842 INTVAL (XEXP (temp
, 1)));
5844 /* If X has only a single bit that might be nonzero, say, bit I, convert
5845 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5846 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5847 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5848 or a SUBREG of one since we'd be making the expression more
5849 complex if it was just a register. */
5852 && ! (GET_CODE (temp
) == SUBREG
5853 && REG_P (SUBREG_REG (temp
)))
5854 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5855 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
5857 rtx temp1
= simplify_shift_const
5858 (NULL_RTX
, ASHIFTRT
, int_mode
,
5859 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
5860 GET_MODE_PRECISION (int_mode
) - 1 - i
),
5861 GET_MODE_PRECISION (int_mode
) - 1 - i
);
5863 /* If all we did was surround TEMP with the two shifts, we
5864 haven't improved anything, so don't use it. Otherwise,
5865 we are better off with TEMP1. */
5866 if (GET_CODE (temp1
) != ASHIFTRT
5867 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5868 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5874 /* We can't handle truncation to a partial integer mode here
5875 because we don't know the real bitsize of the partial
5877 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5880 if (HWI_COMPUTABLE_MODE_P (mode
))
5882 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5883 GET_MODE_MASK (mode
), 0));
5885 /* We can truncate a constant value and return it. */
5886 if (CONST_INT_P (XEXP (x
, 0)))
5887 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5889 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5890 whose value is a comparison can be replaced with a subreg if
5891 STORE_FLAG_VALUE permits. */
5892 if (HWI_COMPUTABLE_MODE_P (mode
)
5893 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5894 && (temp
= get_last_value (XEXP (x
, 0)))
5895 && COMPARISON_P (temp
))
5896 return gen_lowpart (mode
, XEXP (x
, 0));
5900 /* (const (const X)) can become (const X). Do it this way rather than
5901 returning the inner CONST since CONST can be shared with a
5903 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5904 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5908 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5909 can add in an offset. find_split_point will split this address up
5910 again if it doesn't match. */
5911 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5912 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5917 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5918 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5919 bit-field and can be replaced by either a sign_extend or a
5920 sign_extract. The `and' may be a zero_extend and the two
5921 <c>, -<c> constants may be reversed. */
5922 if (GET_CODE (XEXP (x
, 0)) == XOR
5923 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5924 && CONST_INT_P (XEXP (x
, 1))
5925 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5926 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5927 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5928 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5929 && HWI_COMPUTABLE_MODE_P (int_mode
)
5930 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5931 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5932 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5933 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
5934 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5935 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5936 == (unsigned int) i
+ 1))))
5937 return simplify_shift_const
5938 (NULL_RTX
, ASHIFTRT
, int_mode
,
5939 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5940 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5941 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
5942 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
5944 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5945 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5946 the bitsize of the mode - 1. This allows simplification of
5947 "a = (b & 8) == 0;" */
5948 if (XEXP (x
, 1) == constm1_rtx
5949 && !REG_P (XEXP (x
, 0))
5950 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5951 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5952 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5953 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
5954 return simplify_shift_const
5955 (NULL_RTX
, ASHIFTRT
, int_mode
,
5956 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
5957 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
5959 GET_MODE_PRECISION (int_mode
) - 1),
5960 GET_MODE_PRECISION (int_mode
) - 1);
5962 /* If we are adding two things that have no bits in common, convert
5963 the addition into an IOR. This will often be further simplified,
5964 for example in cases like ((a & 1) + (a & 2)), which can
5967 if (HWI_COMPUTABLE_MODE_P (mode
)
5968 && (nonzero_bits (XEXP (x
, 0), mode
)
5969 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5971 /* Try to simplify the expression further. */
5972 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5973 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5975 /* If we could, great. If not, do not go ahead with the IOR
5976 replacement, since PLUS appears in many special purpose
5977 address arithmetic instructions. */
5978 if (GET_CODE (temp
) != CLOBBER
5979 && (GET_CODE (temp
) != IOR
5980 || ((XEXP (temp
, 0) != XEXP (x
, 0)
5981 || XEXP (temp
, 1) != XEXP (x
, 1))
5982 && (XEXP (temp
, 0) != XEXP (x
, 1)
5983 || XEXP (temp
, 1) != XEXP (x
, 0)))))
5987 /* Canonicalize x + x into x << 1. */
5988 if (GET_MODE_CLASS (mode
) == MODE_INT
5989 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
5990 && !side_effects_p (XEXP (x
, 0)))
5991 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
5996 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5997 (and <foo> (const_int pow2-1)) */
5998 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5999 && GET_CODE (XEXP (x
, 1)) == AND
6000 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6001 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6002 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6003 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6004 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6008 /* If we have (mult (plus A B) C), apply the distributive law and then
6009 the inverse distributive law to see if things simplify. This
6010 occurs mostly in addresses, often when unrolling loops. */
6012 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6014 rtx result
= distribute_and_simplify_rtx (x
, 0);
6019 /* Try simplify a*(b/c) as (a*b)/c. */
6020 if (FLOAT_MODE_P (mode
) && flag_associative_math
6021 && GET_CODE (XEXP (x
, 0)) == DIV
)
6023 rtx tem
= simplify_binary_operation (MULT
, mode
,
6024 XEXP (XEXP (x
, 0), 0),
6027 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6032 /* If this is a divide by a power of two, treat it as a shift if
6033 its first operand is a shift. */
6034 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6035 && CONST_INT_P (XEXP (x
, 1))
6036 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6037 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6038 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6039 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6040 || GET_CODE (XEXP (x
, 0)) == ROTATE
6041 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6042 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6047 case GT
: case GTU
: case GE
: case GEU
:
6048 case LT
: case LTU
: case LE
: case LEU
:
6049 case UNEQ
: case LTGT
:
6050 case UNGT
: case UNGE
:
6051 case UNLT
: case UNLE
:
6052 case UNORDERED
: case ORDERED
:
6053 /* If the first operand is a condition code, we can't do anything
6055 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6056 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6057 && ! CC0_P (XEXP (x
, 0))))
6059 rtx op0
= XEXP (x
, 0);
6060 rtx op1
= XEXP (x
, 1);
6061 enum rtx_code new_code
;
6063 if (GET_CODE (op0
) == COMPARE
)
6064 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6066 /* Simplify our comparison, if possible. */
6067 new_code
= simplify_comparison (code
, &op0
, &op1
);
6069 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6070 if only the low-order bit is possibly nonzero in X (such as when
6071 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6072 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6073 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6076 Remove any ZERO_EXTRACT we made when thinking this was a
6077 comparison. It may now be simpler to use, e.g., an AND. If a
6078 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6079 the call to make_compound_operation in the SET case.
6081 Don't apply these optimizations if the caller would
6082 prefer a comparison rather than a value.
6083 E.g., for the condition in an IF_THEN_ELSE most targets need
6084 an explicit comparison. */
6089 else if (STORE_FLAG_VALUE
== 1
6091 && is_int_mode (mode
, &int_mode
)
6092 && op1
== const0_rtx
6093 && int_mode
== GET_MODE (op0
)
6094 && nonzero_bits (op0
, int_mode
) == 1)
6095 return gen_lowpart (int_mode
,
6096 expand_compound_operation (op0
));
6098 else if (STORE_FLAG_VALUE
== 1
6100 && is_int_mode (mode
, &int_mode
)
6101 && op1
== const0_rtx
6102 && int_mode
== GET_MODE (op0
)
6103 && (num_sign_bit_copies (op0
, int_mode
)
6104 == GET_MODE_PRECISION (int_mode
)))
6106 op0
= expand_compound_operation (op0
);
6107 return simplify_gen_unary (NEG
, int_mode
,
6108 gen_lowpart (int_mode
, op0
),
6112 else if (STORE_FLAG_VALUE
== 1
6114 && is_int_mode (mode
, &int_mode
)
6115 && op1
== const0_rtx
6116 && int_mode
== GET_MODE (op0
)
6117 && nonzero_bits (op0
, int_mode
) == 1)
6119 op0
= expand_compound_operation (op0
);
6120 return simplify_gen_binary (XOR
, int_mode
,
6121 gen_lowpart (int_mode
, op0
),
6125 else if (STORE_FLAG_VALUE
== 1
6127 && is_int_mode (mode
, &int_mode
)
6128 && op1
== const0_rtx
6129 && int_mode
== GET_MODE (op0
)
6130 && (num_sign_bit_copies (op0
, int_mode
)
6131 == GET_MODE_PRECISION (int_mode
)))
6133 op0
= expand_compound_operation (op0
);
6134 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6137 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6142 else if (STORE_FLAG_VALUE
== -1
6144 && is_int_mode (mode
, &int_mode
)
6145 && op1
== const0_rtx
6146 && int_mode
== GET_MODE (op0
)
6147 && (num_sign_bit_copies (op0
, int_mode
)
6148 == GET_MODE_PRECISION (int_mode
)))
6149 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6151 else if (STORE_FLAG_VALUE
== -1
6153 && is_int_mode (mode
, &int_mode
)
6154 && op1
== const0_rtx
6155 && int_mode
== GET_MODE (op0
)
6156 && nonzero_bits (op0
, int_mode
) == 1)
6158 op0
= expand_compound_operation (op0
);
6159 return simplify_gen_unary (NEG
, int_mode
,
6160 gen_lowpart (int_mode
, op0
),
6164 else if (STORE_FLAG_VALUE
== -1
6166 && is_int_mode (mode
, &int_mode
)
6167 && op1
== const0_rtx
6168 && int_mode
== GET_MODE (op0
)
6169 && (num_sign_bit_copies (op0
, int_mode
)
6170 == GET_MODE_PRECISION (int_mode
)))
6172 op0
= expand_compound_operation (op0
);
6173 return simplify_gen_unary (NOT
, int_mode
,
6174 gen_lowpart (int_mode
, op0
),
6178 /* If X is 0/1, (eq X 0) is X-1. */
6179 else if (STORE_FLAG_VALUE
== -1
6181 && is_int_mode (mode
, &int_mode
)
6182 && op1
== const0_rtx
6183 && int_mode
== GET_MODE (op0
)
6184 && nonzero_bits (op0
, int_mode
) == 1)
6186 op0
= expand_compound_operation (op0
);
6187 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6190 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6191 one bit that might be nonzero, we can convert (ne x 0) to
6192 (ashift x c) where C puts the bit in the sign bit. Remove any
6193 AND with STORE_FLAG_VALUE when we are done, since we are only
6194 going to test the sign bit. */
6196 && is_int_mode (mode
, &int_mode
)
6197 && HWI_COMPUTABLE_MODE_P (int_mode
)
6198 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6199 && op1
== const0_rtx
6200 && int_mode
== GET_MODE (op0
)
6201 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6203 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6204 expand_compound_operation (op0
),
6205 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6206 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6212 /* If the code changed, return a whole new comparison.
6213 We also need to avoid using SUBST in cases where
6214 simplify_comparison has widened a comparison with a CONST_INT,
6215 since in that case the wider CONST_INT may fail the sanity
6216 checks in do_SUBST. */
6217 if (new_code
!= code
6218 || (CONST_INT_P (op1
)
6219 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6220 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6221 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6223 /* Otherwise, keep this operation, but maybe change its operands.
6224 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6225 SUBST (XEXP (x
, 0), op0
);
6226 SUBST (XEXP (x
, 1), op1
);
6231 return simplify_if_then_else (x
);
6237 /* If we are processing SET_DEST, we are done. */
6241 return expand_compound_operation (x
);
6244 return simplify_set (x
);
6248 return simplify_logical (x
);
6255 /* If this is a shift by a constant amount, simplify it. */
6256 if (CONST_INT_P (XEXP (x
, 1)))
6257 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6258 INTVAL (XEXP (x
, 1)));
6260 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6262 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6264 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6276 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6279 simplify_if_then_else (rtx x
)
6281 machine_mode mode
= GET_MODE (x
);
6282 rtx cond
= XEXP (x
, 0);
6283 rtx true_rtx
= XEXP (x
, 1);
6284 rtx false_rtx
= XEXP (x
, 2);
6285 enum rtx_code true_code
= GET_CODE (cond
);
6286 int comparison_p
= COMPARISON_P (cond
);
6289 enum rtx_code false_code
;
6291 scalar_int_mode int_mode
;
6293 /* Simplify storing of the truth value. */
6294 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6295 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6296 XEXP (cond
, 0), XEXP (cond
, 1));
6298 /* Also when the truth value has to be reversed. */
6300 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6301 && (reversed
= reversed_comparison (cond
, mode
)))
6304 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6305 in it is being compared against certain values. Get the true and false
6306 comparisons and see if that says anything about the value of each arm. */
6309 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6311 && REG_P (XEXP (cond
, 0)))
6314 rtx from
= XEXP (cond
, 0);
6315 rtx true_val
= XEXP (cond
, 1);
6316 rtx false_val
= true_val
;
6319 /* If FALSE_CODE is EQ, swap the codes and arms. */
6321 if (false_code
== EQ
)
6323 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6324 std::swap (true_rtx
, false_rtx
);
6327 scalar_int_mode from_mode
;
6328 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6330 /* If we are comparing against zero and the expression being
6331 tested has only a single bit that might be nonzero, that is
6332 its value when it is not equal to zero. Similarly if it is
6333 known to be -1 or 0. */
6335 && true_val
== const0_rtx
6336 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6339 false_val
= gen_int_mode (nzb
, from_mode
);
6341 else if (true_code
== EQ
6342 && true_val
== const0_rtx
6343 && (num_sign_bit_copies (from
, from_mode
)
6344 == GET_MODE_PRECISION (from_mode
)))
6347 false_val
= constm1_rtx
;
6351 /* Now simplify an arm if we know the value of the register in the
6352 branch and it is used in the arm. Be careful due to the potential
6353 of locally-shared RTL. */
6355 if (reg_mentioned_p (from
, true_rtx
))
6356 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6358 pc_rtx
, pc_rtx
, 0, 0, 0);
6359 if (reg_mentioned_p (from
, false_rtx
))
6360 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6362 pc_rtx
, pc_rtx
, 0, 0, 0);
6364 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6365 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6367 true_rtx
= XEXP (x
, 1);
6368 false_rtx
= XEXP (x
, 2);
6369 true_code
= GET_CODE (cond
);
6372 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6373 reversed, do so to avoid needing two sets of patterns for
6374 subtract-and-branch insns. Similarly if we have a constant in the true
6375 arm, the false arm is the same as the first operand of the comparison, or
6376 the false arm is more complicated than the true arm. */
6379 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6380 && (true_rtx
== pc_rtx
6381 || (CONSTANT_P (true_rtx
)
6382 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6383 || true_rtx
== const0_rtx
6384 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6385 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6386 && !OBJECT_P (false_rtx
))
6387 || reg_mentioned_p (true_rtx
, false_rtx
)
6388 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6390 true_code
= reversed_comparison_code (cond
, NULL
);
6391 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6392 SUBST (XEXP (x
, 1), false_rtx
);
6393 SUBST (XEXP (x
, 2), true_rtx
);
6395 std::swap (true_rtx
, false_rtx
);
6398 /* It is possible that the conditional has been simplified out. */
6399 true_code
= GET_CODE (cond
);
6400 comparison_p
= COMPARISON_P (cond
);
6403 /* If the two arms are identical, we don't need the comparison. */
6405 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6408 /* Convert a == b ? b : a to "a". */
6409 if (true_code
== EQ
&& ! side_effects_p (cond
)
6410 && !HONOR_NANS (mode
)
6411 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6412 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6414 else if (true_code
== NE
&& ! side_effects_p (cond
)
6415 && !HONOR_NANS (mode
)
6416 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6417 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6420 /* Look for cases where we have (abs x) or (neg (abs X)). */
6422 if (GET_MODE_CLASS (mode
) == MODE_INT
6424 && XEXP (cond
, 1) == const0_rtx
6425 && GET_CODE (false_rtx
) == NEG
6426 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6427 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6428 && ! side_effects_p (true_rtx
))
6433 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6437 simplify_gen_unary (NEG
, mode
,
6438 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6444 /* Look for MIN or MAX. */
6446 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6448 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6449 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6450 && ! side_effects_p (cond
))
6455 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6458 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6461 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6464 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6469 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6470 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6471 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6472 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6473 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6474 neither 1 or -1, but it isn't worth checking for. */
6476 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6478 && is_int_mode (mode
, &int_mode
)
6479 && ! side_effects_p (x
))
6481 rtx t
= make_compound_operation (true_rtx
, SET
);
6482 rtx f
= make_compound_operation (false_rtx
, SET
);
6483 rtx cond_op0
= XEXP (cond
, 0);
6484 rtx cond_op1
= XEXP (cond
, 1);
6485 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6486 machine_mode m
= int_mode
;
6487 rtx z
= 0, c1
= NULL_RTX
;
6489 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6490 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6491 || GET_CODE (t
) == ASHIFT
6492 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6493 && rtx_equal_p (XEXP (t
, 0), f
))
6494 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6496 /* If an identity-zero op is commutative, check whether there
6497 would be a match if we swapped the operands. */
6498 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6499 || GET_CODE (t
) == XOR
)
6500 && rtx_equal_p (XEXP (t
, 1), f
))
6501 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6502 else if (GET_CODE (t
) == SIGN_EXTEND
6503 && (GET_CODE (XEXP (t
, 0)) == PLUS
6504 || GET_CODE (XEXP (t
, 0)) == MINUS
6505 || GET_CODE (XEXP (t
, 0)) == IOR
6506 || GET_CODE (XEXP (t
, 0)) == XOR
6507 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6508 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6509 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6510 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6511 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6512 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6513 && (num_sign_bit_copies (f
, GET_MODE (f
))
6515 (GET_MODE_PRECISION (int_mode
)
6516 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 0))))))
6518 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6519 extend_op
= SIGN_EXTEND
;
6520 m
= GET_MODE (XEXP (t
, 0));
6522 else if (GET_CODE (t
) == SIGN_EXTEND
6523 && (GET_CODE (XEXP (t
, 0)) == PLUS
6524 || GET_CODE (XEXP (t
, 0)) == IOR
6525 || GET_CODE (XEXP (t
, 0)) == XOR
)
6526 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6527 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6528 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6529 && (num_sign_bit_copies (f
, GET_MODE (f
))
6531 (GET_MODE_PRECISION (int_mode
)
6532 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 1))))))
6534 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6535 extend_op
= SIGN_EXTEND
;
6536 m
= GET_MODE (XEXP (t
, 0));
6538 else if (GET_CODE (t
) == ZERO_EXTEND
6539 && (GET_CODE (XEXP (t
, 0)) == PLUS
6540 || GET_CODE (XEXP (t
, 0)) == MINUS
6541 || GET_CODE (XEXP (t
, 0)) == IOR
6542 || GET_CODE (XEXP (t
, 0)) == XOR
6543 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6544 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6545 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6546 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6547 && HWI_COMPUTABLE_MODE_P (int_mode
)
6548 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6549 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6550 && ((nonzero_bits (f
, GET_MODE (f
))
6551 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 0))))
6554 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6555 extend_op
= ZERO_EXTEND
;
6556 m
= GET_MODE (XEXP (t
, 0));
6558 else if (GET_CODE (t
) == ZERO_EXTEND
6559 && (GET_CODE (XEXP (t
, 0)) == PLUS
6560 || GET_CODE (XEXP (t
, 0)) == IOR
6561 || GET_CODE (XEXP (t
, 0)) == XOR
)
6562 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6563 && HWI_COMPUTABLE_MODE_P (int_mode
)
6564 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6565 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6566 && ((nonzero_bits (f
, GET_MODE (f
))
6567 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 1))))
6570 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6571 extend_op
= ZERO_EXTEND
;
6572 m
= GET_MODE (XEXP (t
, 0));
6577 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6578 cond_op0
, cond_op1
),
6579 pc_rtx
, pc_rtx
, 0, 0, 0);
6580 temp
= simplify_gen_binary (MULT
, m
, temp
,
6581 simplify_gen_binary (MULT
, m
, c1
,
6583 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6584 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6586 if (extend_op
!= UNKNOWN
)
6587 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6593 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6594 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6595 negation of a single bit, we can convert this operation to a shift. We
6596 can actually do this more generally, but it doesn't seem worth it. */
6599 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6600 && XEXP (cond
, 1) == const0_rtx
6601 && false_rtx
== const0_rtx
6602 && CONST_INT_P (true_rtx
)
6603 && ((1 == nonzero_bits (XEXP (cond
, 0), int_mode
)
6604 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6605 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6606 == GET_MODE_PRECISION (int_mode
))
6607 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6609 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6610 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6612 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6613 non-zero bit in A is C1. */
6614 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6615 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6616 && INTEGRAL_MODE_P (GET_MODE (XEXP (cond
, 0)))
6617 && (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))
6618 == nonzero_bits (XEXP (cond
, 0), GET_MODE (XEXP (cond
, 0)))
6619 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))) >= 0)
6621 rtx val
= XEXP (cond
, 0);
6622 machine_mode val_mode
= GET_MODE (val
);
6623 if (val_mode
== mode
)
6625 else if (GET_MODE_PRECISION (val_mode
) < GET_MODE_PRECISION (mode
))
6626 return simplify_gen_unary (ZERO_EXTEND
, mode
, val
, val_mode
);
6632 /* Simplify X, a SET expression. Return the new expression. */
6635 simplify_set (rtx x
)
6637 rtx src
= SET_SRC (x
);
6638 rtx dest
= SET_DEST (x
);
6640 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6641 rtx_insn
*other_insn
;
6643 scalar_int_mode int_mode
;
6645 /* (set (pc) (return)) gets written as (return). */
6646 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6649 /* Now that we know for sure which bits of SRC we are using, see if we can
6650 simplify the expression for the object knowing that we only need the
6653 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6655 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6656 SUBST (SET_SRC (x
), src
);
6659 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6660 the comparison result and try to simplify it unless we already have used
6661 undobuf.other_insn. */
6662 if ((GET_MODE_CLASS (mode
) == MODE_CC
6663 || GET_CODE (src
) == COMPARE
6665 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6666 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6667 && COMPARISON_P (*cc_use
)
6668 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6670 enum rtx_code old_code
= GET_CODE (*cc_use
);
6671 enum rtx_code new_code
;
6673 int other_changed
= 0;
6674 rtx inner_compare
= NULL_RTX
;
6675 machine_mode compare_mode
= GET_MODE (dest
);
6677 if (GET_CODE (src
) == COMPARE
)
6679 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6680 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6682 inner_compare
= op0
;
6683 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6687 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6689 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6692 new_code
= old_code
;
6693 else if (!CONSTANT_P (tmp
))
6695 new_code
= GET_CODE (tmp
);
6696 op0
= XEXP (tmp
, 0);
6697 op1
= XEXP (tmp
, 1);
6701 rtx pat
= PATTERN (other_insn
);
6702 undobuf
.other_insn
= other_insn
;
6703 SUBST (*cc_use
, tmp
);
6705 /* Attempt to simplify CC user. */
6706 if (GET_CODE (pat
) == SET
)
6708 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6709 if (new_rtx
!= NULL_RTX
)
6710 SUBST (SET_SRC (pat
), new_rtx
);
6713 /* Convert X into a no-op move. */
6714 SUBST (SET_DEST (x
), pc_rtx
);
6715 SUBST (SET_SRC (x
), pc_rtx
);
6719 /* Simplify our comparison, if possible. */
6720 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6722 #ifdef SELECT_CC_MODE
6723 /* If this machine has CC modes other than CCmode, check to see if we
6724 need to use a different CC mode here. */
6725 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6726 compare_mode
= GET_MODE (op0
);
6727 else if (inner_compare
6728 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6729 && new_code
== old_code
6730 && op0
== XEXP (inner_compare
, 0)
6731 && op1
== XEXP (inner_compare
, 1))
6732 compare_mode
= GET_MODE (inner_compare
);
6734 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6736 /* If the mode changed, we have to change SET_DEST, the mode in the
6737 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6738 a hard register, just build new versions with the proper mode. If it
6739 is a pseudo, we lose unless it is only time we set the pseudo, in
6740 which case we can safely change its mode. */
6741 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6743 if (can_change_dest_mode (dest
, 0, compare_mode
))
6745 unsigned int regno
= REGNO (dest
);
6748 if (regno
< FIRST_PSEUDO_REGISTER
)
6749 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6752 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6753 new_dest
= regno_reg_rtx
[regno
];
6756 SUBST (SET_DEST (x
), new_dest
);
6757 SUBST (XEXP (*cc_use
, 0), new_dest
);
6763 #endif /* SELECT_CC_MODE */
6765 /* If the code changed, we have to build a new comparison in
6766 undobuf.other_insn. */
6767 if (new_code
!= old_code
)
6769 int other_changed_previously
= other_changed
;
6770 unsigned HOST_WIDE_INT mask
;
6771 rtx old_cc_use
= *cc_use
;
6773 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6777 /* If the only change we made was to change an EQ into an NE or
6778 vice versa, OP0 has only one bit that might be nonzero, and OP1
6779 is zero, check if changing the user of the condition code will
6780 produce a valid insn. If it won't, we can keep the original code
6781 in that insn by surrounding our operation with an XOR. */
6783 if (((old_code
== NE
&& new_code
== EQ
)
6784 || (old_code
== EQ
&& new_code
== NE
))
6785 && ! other_changed_previously
&& op1
== const0_rtx
6786 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6787 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6789 rtx pat
= PATTERN (other_insn
), note
= 0;
6791 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6792 && ! check_asm_operands (pat
)))
6794 *cc_use
= old_cc_use
;
6797 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6805 undobuf
.other_insn
= other_insn
;
6807 /* Don't generate a compare of a CC with 0, just use that CC. */
6808 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6810 SUBST (SET_SRC (x
), op0
);
6813 /* Otherwise, if we didn't previously have the same COMPARE we
6814 want, create it from scratch. */
6815 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6816 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6818 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6824 /* Get SET_SRC in a form where we have placed back any
6825 compound expressions. Then do the checks below. */
6826 src
= make_compound_operation (src
, SET
);
6827 SUBST (SET_SRC (x
), src
);
6830 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6831 and X being a REG or (subreg (reg)), we may be able to convert this to
6832 (set (subreg:m2 x) (op)).
6834 We can always do this if M1 is narrower than M2 because that means that
6835 we only care about the low bits of the result.
6837 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6838 perform a narrower operation than requested since the high-order bits will
6839 be undefined. On machine where it is defined, this transformation is safe
6840 as long as M1 and M2 have the same number of words. */
6842 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6843 && !OBJECT_P (SUBREG_REG (src
))
6844 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6846 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6847 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6848 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
6849 #ifdef CANNOT_CHANGE_MODE_CLASS
6850 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6851 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6852 GET_MODE (SUBREG_REG (src
)),
6856 || (GET_CODE (dest
) == SUBREG
6857 && REG_P (SUBREG_REG (dest
)))))
6859 SUBST (SET_DEST (x
),
6860 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6862 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6864 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6867 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6870 && GET_CODE (src
) == SUBREG
6871 && subreg_lowpart_p (src
)
6872 && (GET_MODE_PRECISION (GET_MODE (src
))
6873 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src
)))))
6875 rtx inner
= SUBREG_REG (src
);
6876 machine_mode inner_mode
= GET_MODE (inner
);
6878 /* Here we make sure that we don't have a sign bit on. */
6879 if (val_signbit_known_clear_p (GET_MODE (src
),
6880 nonzero_bits (inner
, inner_mode
)))
6882 SUBST (SET_SRC (x
), inner
);
6887 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6888 would require a paradoxical subreg. Replace the subreg with a
6889 zero_extend to avoid the reload that would otherwise be required. */
6891 enum rtx_code extend_op
;
6892 if (paradoxical_subreg_p (src
)
6893 && MEM_P (SUBREG_REG (src
))
6894 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6897 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6902 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6903 are comparing an item known to be 0 or -1 against 0, use a logical
6904 operation instead. Check for one of the arms being an IOR of the other
6905 arm with some value. We compute three terms to be IOR'ed together. In
6906 practice, at most two will be nonzero. Then we do the IOR's. */
6908 if (GET_CODE (dest
) != PC
6909 && GET_CODE (src
) == IF_THEN_ELSE
6910 && is_int_mode (GET_MODE (src
), &int_mode
)
6911 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6912 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6913 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
6914 && (!HAVE_conditional_move
6915 || ! can_conditionally_move_p (int_mode
))
6916 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
6917 == GET_MODE_PRECISION (int_mode
))
6918 && ! side_effects_p (src
))
6920 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6921 ? XEXP (src
, 1) : XEXP (src
, 2));
6922 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6923 ? XEXP (src
, 2) : XEXP (src
, 1));
6924 rtx term1
= const0_rtx
, term2
, term3
;
6926 if (GET_CODE (true_rtx
) == IOR
6927 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6928 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6929 else if (GET_CODE (true_rtx
) == IOR
6930 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6931 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6932 else if (GET_CODE (false_rtx
) == IOR
6933 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6934 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6935 else if (GET_CODE (false_rtx
) == IOR
6936 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6937 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6939 term2
= simplify_gen_binary (AND
, int_mode
,
6940 XEXP (XEXP (src
, 0), 0), true_rtx
);
6941 term3
= simplify_gen_binary (AND
, int_mode
,
6942 simplify_gen_unary (NOT
, int_mode
,
6943 XEXP (XEXP (src
, 0), 0),
6948 simplify_gen_binary (IOR
, int_mode
,
6949 simplify_gen_binary (IOR
, int_mode
,
6956 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6957 whole thing fail. */
6958 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6960 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6963 /* Convert this into a field assignment operation, if possible. */
6964 return make_field_assignment (x
);
6967 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6971 simplify_logical (rtx x
)
6973 rtx op0
= XEXP (x
, 0);
6974 rtx op1
= XEXP (x
, 1);
6975 scalar_int_mode mode
;
6977 switch (GET_CODE (x
))
6980 /* We can call simplify_and_const_int only if we don't lose
6981 any (sign) bits when converting INTVAL (op1) to
6982 "unsigned HOST_WIDE_INT". */
6983 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
6984 && CONST_INT_P (op1
)
6985 && (HWI_COMPUTABLE_MODE_P (mode
)
6986 || INTVAL (op1
) > 0))
6988 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
6989 if (GET_CODE (x
) != AND
)
6996 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6997 apply the distributive law and then the inverse distributive
6998 law to see if things simplify. */
6999 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7001 rtx result
= distribute_and_simplify_rtx (x
, 0);
7005 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7007 rtx result
= distribute_and_simplify_rtx (x
, 1);
7014 /* If we have (ior (and A B) C), apply the distributive law and then
7015 the inverse distributive law to see if things simplify. */
7017 if (GET_CODE (op0
) == AND
)
7019 rtx result
= distribute_and_simplify_rtx (x
, 0);
7024 if (GET_CODE (op1
) == AND
)
7026 rtx result
= distribute_and_simplify_rtx (x
, 1);
7039 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7040 operations" because they can be replaced with two more basic operations.
7041 ZERO_EXTEND is also considered "compound" because it can be replaced with
7042 an AND operation, which is simpler, though only one operation.
7044 The function expand_compound_operation is called with an rtx expression
7045 and will convert it to the appropriate shifts and AND operations,
7046 simplifying at each stage.
7048 The function make_compound_operation is called to convert an expression
7049 consisting of shifts and ANDs into the equivalent compound expression.
7050 It is the inverse of this function, loosely speaking. */
7053 expand_compound_operation (rtx x
)
7055 unsigned HOST_WIDE_INT pos
= 0, len
;
7057 unsigned int modewidth
;
7059 scalar_int_mode inner_mode
;
7061 switch (GET_CODE (x
))
7067 /* We can't necessarily use a const_int for a multiword mode;
7068 it depends on implicitly extending the value.
7069 Since we don't know the right way to extend it,
7070 we can't tell whether the implicit way is right.
7072 Even for a mode that is no wider than a const_int,
7073 we can't win, because we need to sign extend one of its bits through
7074 the rest of it, and we don't know which bit. */
7075 if (CONST_INT_P (XEXP (x
, 0)))
7078 /* Reject modes that aren't scalar integers because turning vector
7079 or complex modes into shifts causes problems. */
7080 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7083 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7084 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7085 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7086 reloaded. If not for that, MEM's would very rarely be safe.
7088 Reject modes bigger than a word, because we might not be able
7089 to reference a two-register group starting with an arbitrary register
7090 (and currently gen_lowpart might crash for a SUBREG). */
7092 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7095 len
= GET_MODE_PRECISION (inner_mode
);
7096 /* If the inner object has VOIDmode (the only way this can happen
7097 is if it is an ASM_OPERANDS), we can't do anything since we don't
7098 know how much masking to do. */
7110 /* If the operand is a CLOBBER, just return it. */
7111 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7114 if (!CONST_INT_P (XEXP (x
, 1))
7115 || !CONST_INT_P (XEXP (x
, 2)))
7118 /* Reject modes that aren't scalar integers because turning vector
7119 or complex modes into shifts causes problems. */
7120 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7123 len
= INTVAL (XEXP (x
, 1));
7124 pos
= INTVAL (XEXP (x
, 2));
7126 /* This should stay within the object being extracted, fail otherwise. */
7127 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7130 if (BITS_BIG_ENDIAN
)
7131 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7138 /* Convert sign extension to zero extension, if we know that the high
7139 bit is not set, as this is easier to optimize. It will be converted
7140 back to cheaper alternative in make_extraction. */
7141 if (GET_CODE (x
) == SIGN_EXTEND
7142 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7143 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7144 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7147 machine_mode mode
= GET_MODE (x
);
7148 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7149 rtx temp2
= expand_compound_operation (temp
);
7151 /* Make sure this is a profitable operation. */
7152 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7153 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7155 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7156 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7162 /* We can optimize some special cases of ZERO_EXTEND. */
7163 if (GET_CODE (x
) == ZERO_EXTEND
)
7165 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7166 know that the last value didn't have any inappropriate bits
7168 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7169 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7170 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7171 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), GET_MODE (x
))
7172 & ~GET_MODE_MASK (inner_mode
)) == 0)
7173 return XEXP (XEXP (x
, 0), 0);
7175 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7176 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7177 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7178 && subreg_lowpart_p (XEXP (x
, 0))
7179 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7180 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), GET_MODE (x
))
7181 & ~GET_MODE_MASK (inner_mode
)) == 0)
7182 return SUBREG_REG (XEXP (x
, 0));
7184 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7185 is a comparison and STORE_FLAG_VALUE permits. This is like
7186 the first case, but it works even when GET_MODE (x) is larger
7187 than HOST_WIDE_INT. */
7188 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7189 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7190 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7191 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7192 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7193 return XEXP (XEXP (x
, 0), 0);
7195 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7196 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7197 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7198 && subreg_lowpart_p (XEXP (x
, 0))
7199 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7200 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7201 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7202 return SUBREG_REG (XEXP (x
, 0));
7206 /* If we reach here, we want to return a pair of shifts. The inner
7207 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7208 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7209 logical depending on the value of UNSIGNEDP.
7211 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7212 converted into an AND of a shift.
7214 We must check for the case where the left shift would have a negative
7215 count. This can happen in a case like (x >> 31) & 255 on machines
7216 that can't shift by a constant. On those machines, we would first
7217 combine the shift with the AND to produce a variable-position
7218 extraction. Then the constant of 31 would be substituted in
7219 to produce such a position. */
7221 modewidth
= GET_MODE_PRECISION (GET_MODE (x
));
7222 if (modewidth
>= pos
+ len
)
7224 machine_mode mode
= GET_MODE (x
);
7225 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7226 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7228 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7229 tem
, modewidth
- pos
- len
);
7230 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7231 mode
, tem
, modewidth
- len
);
7233 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7234 tem
= simplify_and_const_int (NULL_RTX
, GET_MODE (x
),
7235 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7238 (HOST_WIDE_INT_1U
<< len
) - 1);
7240 /* Any other cases we can't handle. */
7243 /* If we couldn't do this for some reason, return the original
7245 if (GET_CODE (tem
) == CLOBBER
)
7251 /* X is a SET which contains an assignment of one object into
7252 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7253 or certain SUBREGS). If possible, convert it into a series of
7256 We half-heartedly support variable positions, but do not at all
7257 support variable lengths. */
7260 expand_field_assignment (const_rtx x
)
7263 rtx pos
; /* Always counts from low bit. */
7265 rtx mask
, cleared
, masked
;
7266 scalar_int_mode compute_mode
;
7268 /* Loop until we find something we can't simplify. */
7271 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7272 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7274 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7275 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7276 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7278 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7279 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7281 inner
= XEXP (SET_DEST (x
), 0);
7282 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7283 pos
= XEXP (SET_DEST (x
), 2);
7285 /* A constant position should stay within the width of INNER. */
7286 if (CONST_INT_P (pos
)
7287 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7290 if (BITS_BIG_ENDIAN
)
7292 if (CONST_INT_P (pos
))
7293 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7295 else if (GET_CODE (pos
) == MINUS
7296 && CONST_INT_P (XEXP (pos
, 1))
7297 && (INTVAL (XEXP (pos
, 1))
7298 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7299 /* If position is ADJUST - X, new position is X. */
7300 pos
= XEXP (pos
, 0);
7303 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7304 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7305 gen_int_mode (prec
- len
,
7312 /* A SUBREG between two modes that occupy the same numbers of words
7313 can be done by moving the SUBREG to the source. */
7314 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7315 /* We need SUBREGs to compute nonzero_bits properly. */
7316 && nonzero_sign_valid
7317 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7318 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7319 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7320 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7322 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7324 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7331 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7332 inner
= SUBREG_REG (inner
);
7334 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7335 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7337 /* Don't do anything for vector or complex integral types. */
7338 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7341 /* Try to find an integral mode to pun with. */
7342 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7343 .exists (&compute_mode
))
7346 inner
= gen_lowpart (compute_mode
, inner
);
7349 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7350 if (len
>= HOST_BITS_PER_WIDE_INT
)
7353 /* Don't try to compute in too wide unsupported modes. */
7354 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7357 /* Now compute the equivalent expression. Make a copy of INNER
7358 for the SET_DEST in case it is a MEM into which we will substitute;
7359 we don't want shared RTL in that case. */
7360 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7362 cleared
= simplify_gen_binary (AND
, compute_mode
,
7363 simplify_gen_unary (NOT
, compute_mode
,
7364 simplify_gen_binary (ASHIFT
,
7369 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7370 simplify_gen_binary (
7372 gen_lowpart (compute_mode
, SET_SRC (x
)),
7376 x
= gen_rtx_SET (copy_rtx (inner
),
7377 simplify_gen_binary (IOR
, compute_mode
,
7384 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7385 it is an RTX that represents the (variable) starting position; otherwise,
7386 POS is the (constant) starting bit position. Both are counted from the LSB.
7388 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7390 IN_DEST is nonzero if this is a reference in the destination of a SET.
7391 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7392 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7395 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7396 ZERO_EXTRACT should be built even for bits starting at bit 0.
7398 MODE is the desired mode of the result (if IN_DEST == 0).
7400 The result is an RTX for the extraction or NULL_RTX if the target
7404 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7405 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7406 int in_dest
, int in_compare
)
7408 /* This mode describes the size of the storage area
7409 to fetch the overall value from. Within that, we
7410 ignore the POS lowest bits, etc. */
7411 machine_mode is_mode
= GET_MODE (inner
);
7412 machine_mode inner_mode
;
7413 machine_mode wanted_inner_mode
;
7414 machine_mode wanted_inner_reg_mode
= word_mode
;
7415 machine_mode pos_mode
= word_mode
;
7416 machine_mode extraction_mode
= word_mode
;
7418 rtx orig_pos_rtx
= pos_rtx
;
7419 HOST_WIDE_INT orig_pos
;
7421 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7422 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7424 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7426 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7427 consider just the QI as the memory to extract from.
7428 The subreg adds or removes high bits; its mode is
7429 irrelevant to the meaning of this extraction,
7430 since POS and LEN count from the lsb. */
7431 if (MEM_P (SUBREG_REG (inner
)))
7432 is_mode
= GET_MODE (SUBREG_REG (inner
));
7433 inner
= SUBREG_REG (inner
);
7435 else if (GET_CODE (inner
) == ASHIFT
7436 && CONST_INT_P (XEXP (inner
, 1))
7437 && pos_rtx
== 0 && pos
== 0
7438 && len
> UINTVAL (XEXP (inner
, 1)))
7440 /* We're extracting the least significant bits of an rtx
7441 (ashift X (const_int C)), where LEN > C. Extract the
7442 least significant (LEN - C) bits of X, giving an rtx
7443 whose mode is MODE, then shift it left C times. */
7444 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7445 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7446 unsignedp
, in_dest
, in_compare
);
7448 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7450 else if (GET_CODE (inner
) == TRUNCATE
)
7451 inner
= XEXP (inner
, 0);
7453 inner_mode
= GET_MODE (inner
);
7455 /* See if this can be done without an extraction. We never can if the
7456 width of the field is not the same as that of some integer mode. For
7457 registers, we can only avoid the extraction if the position is at the
7458 low-order bit and this is either not in the destination or we have the
7459 appropriate STRICT_LOW_PART operation available.
7461 For MEM, we can avoid an extract if the field starts on an appropriate
7462 boundary and we can change the mode of the memory reference. */
7464 scalar_int_mode tmode
;
7465 if (int_mode_for_size (len
, 1).exists (&tmode
)
7466 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7468 && (pos
== 0 || REG_P (inner
))
7469 && (inner_mode
== tmode
7471 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7472 || reg_truncated_to_mode (tmode
, inner
))
7475 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7476 || (MEM_P (inner
) && pos_rtx
== 0
7478 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7479 : BITS_PER_UNIT
)) == 0
7480 /* We can't do this if we are widening INNER_MODE (it
7481 may not be aligned, for one thing). */
7482 && !paradoxical_subreg_p (tmode
, inner_mode
)
7483 && (inner_mode
== tmode
7484 || (! mode_dependent_address_p (XEXP (inner
, 0),
7485 MEM_ADDR_SPACE (inner
))
7486 && ! MEM_VOLATILE_P (inner
))))))
7488 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7489 field. If the original and current mode are the same, we need not
7490 adjust the offset. Otherwise, we do if bytes big endian.
7492 If INNER is not a MEM, get a piece consisting of just the field
7493 of interest (in this case POS % BITS_PER_WORD must be 0). */
7497 HOST_WIDE_INT offset
;
7499 /* POS counts from lsb, but make OFFSET count in memory order. */
7500 if (BYTES_BIG_ENDIAN
)
7501 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7503 offset
= pos
/ BITS_PER_UNIT
;
7505 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7507 else if (REG_P (inner
))
7509 if (tmode
!= inner_mode
)
7511 /* We can't call gen_lowpart in a DEST since we
7512 always want a SUBREG (see below) and it would sometimes
7513 return a new hard register. */
7517 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7519 /* Avoid creating invalid subregs, for example when
7520 simplifying (x>>32)&255. */
7521 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7524 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7527 new_rtx
= gen_lowpart (tmode
, inner
);
7533 new_rtx
= force_to_mode (inner
, tmode
,
7534 len
>= HOST_BITS_PER_WIDE_INT
7536 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7538 /* If this extraction is going into the destination of a SET,
7539 make a STRICT_LOW_PART unless we made a MEM. */
7542 return (MEM_P (new_rtx
) ? new_rtx
7543 : (GET_CODE (new_rtx
) != SUBREG
7544 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7545 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7550 if (CONST_SCALAR_INT_P (new_rtx
))
7551 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7552 mode
, new_rtx
, tmode
);
7554 /* If we know that no extraneous bits are set, and that the high
7555 bit is not set, convert the extraction to the cheaper of
7556 sign and zero extension, that are equivalent in these cases. */
7557 if (flag_expensive_optimizations
7558 && (HWI_COMPUTABLE_MODE_P (tmode
)
7559 && ((nonzero_bits (new_rtx
, tmode
)
7560 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7563 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7564 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7566 /* Prefer ZERO_EXTENSION, since it gives more information to
7568 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7569 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7574 /* Otherwise, sign- or zero-extend unless we already are in the
7577 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7581 /* Unless this is a COMPARE or we have a funny memory reference,
7582 don't do anything with zero-extending field extracts starting at
7583 the low-order bit since they are simple AND operations. */
7584 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7585 && ! in_compare
&& unsignedp
)
7588 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7589 if the position is not a constant and the length is not 1. In all
7590 other cases, we would only be going outside our object in cases when
7591 an original shift would have been undefined. */
7593 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7594 || (pos_rtx
!= 0 && len
!= 1)))
7597 enum extraction_pattern pattern
= (in_dest
? EP_insv
7598 : unsignedp
? EP_extzv
: EP_extv
);
7600 /* If INNER is not from memory, we want it to have the mode of a register
7601 extraction pattern's structure operand, or word_mode if there is no
7602 such pattern. The same applies to extraction_mode and pos_mode
7603 and their respective operands.
7605 For memory, assume that the desired extraction_mode and pos_mode
7606 are the same as for a register operation, since at present we don't
7607 have named patterns for aligned memory structures. */
7608 struct extraction_insn insn
;
7609 if (get_best_reg_extraction_insn (&insn
, pattern
,
7610 GET_MODE_BITSIZE (inner_mode
), mode
))
7612 wanted_inner_reg_mode
= insn
.struct_mode
;
7613 pos_mode
= insn
.pos_mode
;
7614 extraction_mode
= insn
.field_mode
;
7617 /* Never narrow an object, since that might not be safe. */
7619 if (mode
!= VOIDmode
7620 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7621 extraction_mode
= mode
;
7624 wanted_inner_mode
= wanted_inner_reg_mode
;
7627 /* Be careful not to go beyond the extracted object and maintain the
7628 natural alignment of the memory. */
7629 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7630 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7631 > GET_MODE_BITSIZE (wanted_inner_mode
))
7632 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7637 if (BITS_BIG_ENDIAN
)
7639 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7640 BITS_BIG_ENDIAN style. If position is constant, compute new
7641 position. Otherwise, build subtraction.
7642 Note that POS is relative to the mode of the original argument.
7643 If it's a MEM we need to recompute POS relative to that.
7644 However, if we're extracting from (or inserting into) a register,
7645 we want to recompute POS relative to wanted_inner_mode. */
7646 int width
= (MEM_P (inner
)
7647 ? GET_MODE_BITSIZE (is_mode
)
7648 : GET_MODE_BITSIZE (wanted_inner_mode
));
7651 pos
= width
- len
- pos
;
7654 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7655 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7657 /* POS may be less than 0 now, but we check for that below.
7658 Note that it can only be less than 0 if !MEM_P (inner). */
7661 /* If INNER has a wider mode, and this is a constant extraction, try to
7662 make it smaller and adjust the byte to point to the byte containing
7664 if (wanted_inner_mode
!= VOIDmode
7665 && inner_mode
!= wanted_inner_mode
7667 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7669 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7670 && ! MEM_VOLATILE_P (inner
))
7674 /* The computations below will be correct if the machine is big
7675 endian in both bits and bytes or little endian in bits and bytes.
7676 If it is mixed, we must adjust. */
7678 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7679 adjust OFFSET to compensate. */
7680 if (BYTES_BIG_ENDIAN
7681 && paradoxical_subreg_p (is_mode
, inner_mode
))
7682 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7684 /* We can now move to the desired byte. */
7685 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7686 * GET_MODE_SIZE (wanted_inner_mode
);
7687 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7689 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7690 && is_mode
!= wanted_inner_mode
)
7691 offset
= (GET_MODE_SIZE (is_mode
)
7692 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7694 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7697 /* If INNER is not memory, get it into the proper mode. If we are changing
7698 its mode, POS must be a constant and smaller than the size of the new
7700 else if (!MEM_P (inner
))
7702 /* On the LHS, don't create paradoxical subregs implicitely truncating
7703 the register unless TRULY_NOOP_TRUNCATION. */
7705 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7709 if (GET_MODE (inner
) != wanted_inner_mode
7711 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7717 inner
= force_to_mode (inner
, wanted_inner_mode
,
7719 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7721 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7726 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7727 have to zero extend. Otherwise, we can just use a SUBREG. */
7729 && GET_MODE_SIZE (pos_mode
) > GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7731 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7732 GET_MODE (pos_rtx
));
7734 /* If we know that no extraneous bits are set, and that the high
7735 bit is not set, convert extraction to cheaper one - either
7736 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7738 if (flag_expensive_optimizations
7739 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7740 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7741 & ~(((unsigned HOST_WIDE_INT
)
7742 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7746 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7747 GET_MODE (pos_rtx
));
7749 /* Prefer ZERO_EXTENSION, since it gives more information to
7751 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7752 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7758 /* Make POS_RTX unless we already have it and it is correct. If we don't
7759 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7761 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7762 pos_rtx
= orig_pos_rtx
;
7764 else if (pos_rtx
== 0)
7765 pos_rtx
= GEN_INT (pos
);
7767 /* Make the required operation. See if we can use existing rtx. */
7768 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7769 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7771 new_rtx
= gen_lowpart (mode
, new_rtx
);
7776 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7777 with any other operations in X. Return X without that shift if so. */
7780 extract_left_shift (rtx x
, int count
)
7782 enum rtx_code code
= GET_CODE (x
);
7783 machine_mode mode
= GET_MODE (x
);
7789 /* This is the shift itself. If it is wide enough, we will return
7790 either the value being shifted if the shift count is equal to
7791 COUNT or a shift for the difference. */
7792 if (CONST_INT_P (XEXP (x
, 1))
7793 && INTVAL (XEXP (x
, 1)) >= count
)
7794 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7795 INTVAL (XEXP (x
, 1)) - count
);
7799 if ((tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7800 return simplify_gen_unary (code
, mode
, tem
, mode
);
7804 case PLUS
: case IOR
: case XOR
: case AND
:
7805 /* If we can safely shift this constant and we find the inner shift,
7806 make a new operation. */
7807 if (CONST_INT_P (XEXP (x
, 1))
7808 && (UINTVAL (XEXP (x
, 1))
7809 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7810 && (tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7812 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7813 return simplify_gen_binary (code
, mode
, tem
,
7814 gen_int_mode (val
, mode
));
7825 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7826 level of the expression and MODE is its mode. IN_CODE is as for
7827 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7828 that should be used when recursing on operands of *X_PTR.
7830 There are two possible actions:
7832 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7833 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7835 - Return a new rtx, which the caller returns directly. */
7838 make_compound_operation_int (machine_mode mode
, rtx
*x_ptr
,
7839 enum rtx_code in_code
,
7840 enum rtx_code
*next_code_ptr
)
7843 enum rtx_code next_code
= *next_code_ptr
;
7844 enum rtx_code code
= GET_CODE (x
);
7845 int mode_width
= GET_MODE_PRECISION (mode
);
7850 scalar_int_mode inner_mode
;
7851 bool equality_comparison
= false;
7855 equality_comparison
= true;
7859 /* Process depending on the code of this operation. If NEW is set
7860 nonzero, it will be returned. */
7865 /* Convert shifts by constants into multiplications if inside
7867 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7868 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7869 && INTVAL (XEXP (x
, 1)) >= 0)
7871 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7872 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7874 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7875 if (GET_CODE (new_rtx
) == NEG
)
7877 new_rtx
= XEXP (new_rtx
, 0);
7880 multval
= trunc_int_for_mode (multval
, mode
);
7881 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7888 lhs
= make_compound_operation (lhs
, next_code
);
7889 rhs
= make_compound_operation (rhs
, next_code
);
7890 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7892 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7894 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7896 else if (GET_CODE (lhs
) == MULT
7897 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7899 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7900 simplify_gen_unary (NEG
, mode
,
7903 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7907 SUBST (XEXP (x
, 0), lhs
);
7908 SUBST (XEXP (x
, 1), rhs
);
7910 maybe_swap_commutative_operands (x
);
7916 lhs
= make_compound_operation (lhs
, next_code
);
7917 rhs
= make_compound_operation (rhs
, next_code
);
7918 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
7920 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7922 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7924 else if (GET_CODE (rhs
) == MULT
7925 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7927 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7928 simplify_gen_unary (NEG
, mode
,
7931 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7935 SUBST (XEXP (x
, 0), lhs
);
7936 SUBST (XEXP (x
, 1), rhs
);
7941 /* If the second operand is not a constant, we can't do anything
7943 if (!CONST_INT_P (XEXP (x
, 1)))
7946 /* If the constant is a power of two minus one and the first operand
7947 is a logical right shift, make an extraction. */
7948 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7949 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7951 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7952 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7953 0, in_code
== COMPARE
);
7956 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7957 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7958 && subreg_lowpart_p (XEXP (x
, 0))
7959 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
7961 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7962 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7964 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
7965 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
7966 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
7968 i
, 1, 0, in_code
== COMPARE
);
7970 /* If we narrowed the mode when dropping the subreg, then we lose. */
7971 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
7974 /* If that didn't give anything, see if the AND simplifies on
7976 if (!new_rtx
&& i
>= 0)
7978 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7979 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
7980 0, in_code
== COMPARE
);
7983 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7984 else if ((GET_CODE (XEXP (x
, 0)) == XOR
7985 || GET_CODE (XEXP (x
, 0)) == IOR
)
7986 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
7987 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
7988 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7990 /* Apply the distributive law, and then try to make extractions. */
7991 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
7992 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
7994 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
7996 new_rtx
= make_compound_operation (new_rtx
, in_code
);
7999 /* If we are have (and (rotate X C) M) and C is larger than the number
8000 of bits in M, this is an extraction. */
8002 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8003 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8004 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8005 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8007 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8008 new_rtx
= make_extraction (mode
, new_rtx
,
8009 (GET_MODE_PRECISION (mode
)
8010 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8011 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8014 /* On machines without logical shifts, if the operand of the AND is
8015 a logical shift and our mask turns off all the propagated sign
8016 bits, we can replace the logical shift with an arithmetic shift. */
8017 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8018 && !have_insn_for (LSHIFTRT
, mode
)
8019 && have_insn_for (ASHIFTRT
, mode
)
8020 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8021 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8022 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8023 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8025 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8027 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8028 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8030 gen_rtx_ASHIFTRT (mode
,
8031 make_compound_operation
8032 (XEXP (XEXP (x
, 0), 0), next_code
),
8033 XEXP (XEXP (x
, 0), 1)));
8036 /* If the constant is one less than a power of two, this might be
8037 representable by an extraction even if no shift is present.
8038 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8039 we are in a COMPARE. */
8040 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8041 new_rtx
= make_extraction (mode
,
8042 make_compound_operation (XEXP (x
, 0),
8044 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8046 /* If we are in a comparison and this is an AND with a power of two,
8047 convert this into the appropriate bit extract. */
8048 else if (in_code
== COMPARE
8049 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8050 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8051 new_rtx
= make_extraction (mode
,
8052 make_compound_operation (XEXP (x
, 0),
8054 i
, NULL_RTX
, 1, 1, 0, 1);
8056 /* If the one operand is a paradoxical subreg of a register or memory and
8057 the constant (limited to the smaller mode) has only zero bits where
8058 the sub expression has known zero bits, this can be expressed as
8060 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8064 sub
= XEXP (XEXP (x
, 0), 0);
8065 machine_mode sub_mode
= GET_MODE (sub
);
8066 if ((REG_P (sub
) || MEM_P (sub
))
8067 && GET_MODE_PRECISION (sub_mode
) < mode_width
)
8069 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8070 unsigned HOST_WIDE_INT mask
;
8072 /* original AND constant with all the known zero bits set */
8073 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8074 if ((mask
& mode_mask
) == mode_mask
)
8076 new_rtx
= make_compound_operation (sub
, next_code
);
8077 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0,
8078 GET_MODE_PRECISION (sub_mode
),
8079 1, 0, in_code
== COMPARE
);
8087 /* If the sign bit is known to be zero, replace this with an
8088 arithmetic shift. */
8089 if (have_insn_for (ASHIFTRT
, mode
)
8090 && ! have_insn_for (LSHIFTRT
, mode
)
8091 && mode_width
<= HOST_BITS_PER_WIDE_INT
8092 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8094 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8095 make_compound_operation (XEXP (x
, 0),
8107 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8108 this is a SIGN_EXTRACT. */
8109 if (CONST_INT_P (rhs
)
8110 && GET_CODE (lhs
) == ASHIFT
8111 && CONST_INT_P (XEXP (lhs
, 1))
8112 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8113 && INTVAL (XEXP (lhs
, 1)) >= 0
8114 && INTVAL (rhs
) < mode_width
)
8116 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8117 new_rtx
= make_extraction (mode
, new_rtx
,
8118 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8119 NULL_RTX
, mode_width
- INTVAL (rhs
),
8120 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8124 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8125 If so, try to merge the shifts into a SIGN_EXTEND. We could
8126 also do this for some cases of SIGN_EXTRACT, but it doesn't
8127 seem worth the effort; the case checked for occurs on Alpha. */
8130 && ! (GET_CODE (lhs
) == SUBREG
8131 && (OBJECT_P (SUBREG_REG (lhs
))))
8132 && CONST_INT_P (rhs
)
8133 && INTVAL (rhs
) >= 0
8134 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8135 && INTVAL (rhs
) < mode_width
8136 && (new_rtx
= extract_left_shift (lhs
, INTVAL (rhs
))) != 0)
8137 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
8138 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8139 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8144 /* Call ourselves recursively on the inner expression. If we are
8145 narrowing the object and it has a different RTL code from
8146 what it originally did, do this SUBREG as a force_to_mode. */
8148 rtx inner
= SUBREG_REG (x
), simplified
;
8149 enum rtx_code subreg_code
= in_code
;
8151 /* If the SUBREG is masking of a logical right shift,
8152 make an extraction. */
8153 if (GET_CODE (inner
) == LSHIFTRT
8154 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8155 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8156 && CONST_INT_P (XEXP (inner
, 1))
8157 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8158 && subreg_lowpart_p (x
))
8160 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8161 int width
= GET_MODE_PRECISION (inner_mode
)
8162 - INTVAL (XEXP (inner
, 1));
8163 if (width
> mode_width
)
8165 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8166 width
, 1, 0, in_code
== COMPARE
);
8170 /* If in_code is COMPARE, it isn't always safe to pass it through
8171 to the recursive make_compound_operation call. */
8172 if (subreg_code
== COMPARE
8173 && (!subreg_lowpart_p (x
)
8174 || GET_CODE (inner
) == SUBREG
8175 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8176 is (const_int 0), rather than
8177 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8178 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8179 for non-equality comparisons against 0 is not equivalent
8180 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8181 || (GET_CODE (inner
) == AND
8182 && CONST_INT_P (XEXP (inner
, 1))
8183 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8184 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8185 >= GET_MODE_BITSIZE (mode
) - 1)))
8188 tem
= make_compound_operation (inner
, subreg_code
);
8191 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8195 if (GET_CODE (tem
) != GET_CODE (inner
)
8196 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8197 && subreg_lowpart_p (x
))
8200 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8202 /* If we have something other than a SUBREG, we might have
8203 done an expansion, so rerun ourselves. */
8204 if (GET_CODE (newer
) != SUBREG
)
8205 newer
= make_compound_operation (newer
, in_code
);
8207 /* force_to_mode can expand compounds. If it just re-expanded the
8208 compound, use gen_lowpart to convert to the desired mode. */
8209 if (rtx_equal_p (newer
, x
)
8210 /* Likewise if it re-expanded the compound only partially.
8211 This happens for SUBREG of ZERO_EXTRACT if they extract
8212 the same number of bits. */
8213 || (GET_CODE (newer
) == SUBREG
8214 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8215 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8216 && GET_CODE (inner
) == AND
8217 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8218 return gen_lowpart (GET_MODE (x
), tem
);
8233 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8234 *next_code_ptr
= next_code
;
8238 /* Look at the expression rooted at X. Look for expressions
8239 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8240 Form these expressions.
8242 Return the new rtx, usually just X.
8244 Also, for machines like the VAX that don't have logical shift insns,
8245 try to convert logical to arithmetic shift operations in cases where
8246 they are equivalent. This undoes the canonicalizations to logical
8247 shifts done elsewhere.
8249 We try, as much as possible, to re-use rtl expressions to save memory.
8251 IN_CODE says what kind of expression we are processing. Normally, it is
8252 SET. In a memory address it is MEM. When processing the arguments of
8253 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8254 precisely it is an equality comparison against zero. */
8257 make_compound_operation (rtx x
, enum rtx_code in_code
)
8259 enum rtx_code code
= GET_CODE (x
);
8262 enum rtx_code next_code
;
8265 /* Select the code to be used in recursive calls. Once we are inside an
8266 address, we stay there. If we have a comparison, set to COMPARE,
8267 but once inside, go back to our default of SET. */
8269 next_code
= (code
== MEM
? MEM
8270 : ((code
== COMPARE
|| COMPARISON_P (x
))
8271 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8272 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8274 scalar_int_mode mode
;
8275 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8277 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8281 code
= GET_CODE (x
);
8284 /* Now recursively process each operand of this operation. We need to
8285 handle ZERO_EXTEND specially so that we don't lose track of the
8287 if (code
== ZERO_EXTEND
)
8289 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8290 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8291 new_rtx
, GET_MODE (XEXP (x
, 0)));
8294 SUBST (XEXP (x
, 0), new_rtx
);
8298 fmt
= GET_RTX_FORMAT (code
);
8299 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8302 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8303 SUBST (XEXP (x
, i
), new_rtx
);
8305 else if (fmt
[i
] == 'E')
8306 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8308 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8309 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8312 maybe_swap_commutative_operands (x
);
8316 /* Given M see if it is a value that would select a field of bits
8317 within an item, but not the entire word. Return -1 if not.
8318 Otherwise, return the starting position of the field, where 0 is the
8321 *PLEN is set to the length of the field. */
8324 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8326 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8327 int pos
= m
? ctz_hwi (m
) : -1;
8331 /* Now shift off the low-order zero bits and see if we have a
8332 power of two minus 1. */
8333 len
= exact_log2 ((m
>> pos
) + 1);
8342 /* If X refers to a register that equals REG in value, replace these
8343 references with REG. */
8345 canon_reg_for_combine (rtx x
, rtx reg
)
8352 enum rtx_code code
= GET_CODE (x
);
8353 switch (GET_RTX_CLASS (code
))
8356 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8357 if (op0
!= XEXP (x
, 0))
8358 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8363 case RTX_COMM_ARITH
:
8364 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8365 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8366 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8367 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8371 case RTX_COMM_COMPARE
:
8372 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8373 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8374 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8375 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8376 GET_MODE (op0
), op0
, op1
);
8380 case RTX_BITFIELD_OPS
:
8381 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8382 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8383 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8384 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8385 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8386 GET_MODE (op0
), op0
, op1
, op2
);
8392 if (rtx_equal_p (get_last_value (reg
), x
)
8393 || rtx_equal_p (reg
, get_last_value (x
)))
8402 fmt
= GET_RTX_FORMAT (code
);
8404 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8407 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8408 if (op
!= XEXP (x
, i
))
8418 else if (fmt
[i
] == 'E')
8421 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8423 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8424 if (op
!= XVECEXP (x
, i
, j
))
8431 XVECEXP (x
, i
, j
) = op
;
8442 /* Return X converted to MODE. If the value is already truncated to
8443 MODE we can just return a subreg even though in the general case we
8444 would need an explicit truncation. */
8447 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8449 if (!CONST_INT_P (x
)
8450 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
8451 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8452 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8454 /* Bit-cast X into an integer mode. */
8455 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8456 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8457 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8461 return gen_lowpart (mode
, x
);
8464 /* See if X can be simplified knowing that we will only refer to it in
8465 MODE and will only refer to those bits that are nonzero in MASK.
8466 If other bits are being computed or if masking operations are done
8467 that select a superset of the bits in MASK, they can sometimes be
8470 Return a possibly simplified expression, but always convert X to
8471 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8473 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8474 are all off in X. This is used when X will be complemented, by either
8475 NOT, NEG, or XOR. */
8478 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8481 enum rtx_code code
= GET_CODE (x
);
8482 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8483 machine_mode op_mode
;
8484 unsigned HOST_WIDE_INT fuller_mask
, nonzero
;
8487 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8488 code below will do the wrong thing since the mode of such an
8489 expression is VOIDmode.
8491 Also do nothing if X is a CLOBBER; this can happen if X was
8492 the return value from a call to gen_lowpart. */
8493 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8496 /* We want to perform the operation in its present mode unless we know
8497 that the operation is valid in MODE, in which case we do the operation
8499 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8500 && have_insn_for (code
, mode
))
8501 ? mode
: GET_MODE (x
));
8503 /* It is not valid to do a right-shift in a narrower mode
8504 than the one it came in with. */
8505 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8506 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (GET_MODE (x
)))
8507 op_mode
= GET_MODE (x
);
8509 /* Truncate MASK to fit OP_MODE. */
8511 mask
&= GET_MODE_MASK (op_mode
);
8513 /* When we have an arithmetic operation, or a shift whose count we
8514 do not know, we need to assume that all bits up to the highest-order
8515 bit in MASK will be needed. This is how we form such a mask. */
8516 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8517 fuller_mask
= HOST_WIDE_INT_M1U
;
8519 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8522 /* Determine what bits of X are guaranteed to be (non)zero. */
8523 nonzero
= nonzero_bits (x
, mode
);
8525 /* If none of the bits in X are needed, return a zero. */
8526 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8529 /* If X is a CONST_INT, return a new one. Do this here since the
8530 test below will fail. */
8531 if (CONST_INT_P (x
))
8533 if (SCALAR_INT_MODE_P (mode
))
8534 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8537 x
= GEN_INT (INTVAL (x
) & mask
);
8538 return gen_lowpart_common (mode
, x
);
8542 /* If X is narrower than MODE and we want all the bits in X's mode, just
8543 get X in the proper mode. */
8544 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8545 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8546 return gen_lowpart (mode
, x
);
8548 /* We can ignore the effect of a SUBREG if it narrows the mode or
8549 if the constant masks to zero all the bits the mode doesn't have. */
8550 if (GET_CODE (x
) == SUBREG
8551 && subreg_lowpart_p (x
)
8552 && ((GET_MODE_SIZE (GET_MODE (x
))
8553 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8555 & GET_MODE_MASK (GET_MODE (x
))
8556 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8557 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8559 /* The arithmetic simplifications here only work for scalar integer modes. */
8560 if (!SCALAR_INT_MODE_P (mode
) || !SCALAR_INT_MODE_P (GET_MODE (x
)))
8561 return gen_lowpart_or_truncate (mode
, x
);
8566 /* If X is a (clobber (const_int)), return it since we know we are
8567 generating something that won't match. */
8574 x
= expand_compound_operation (x
);
8575 if (GET_CODE (x
) != code
)
8576 return force_to_mode (x
, mode
, mask
, next_select
);
8580 /* Similarly for a truncate. */
8581 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8584 /* If this is an AND with a constant, convert it into an AND
8585 whose constant is the AND of that constant with MASK. If it
8586 remains an AND of MASK, delete it since it is redundant. */
8588 if (CONST_INT_P (XEXP (x
, 1)))
8590 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8591 mask
& INTVAL (XEXP (x
, 1)));
8593 /* If X is still an AND, see if it is an AND with a mask that
8594 is just some low-order bits. If so, and it is MASK, we don't
8597 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8598 && ((INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (GET_MODE (x
)))
8602 /* If it remains an AND, try making another AND with the bits
8603 in the mode mask that aren't in MASK turned on. If the
8604 constant in the AND is wide enough, this might make a
8605 cheaper constant. */
8607 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8608 && GET_MODE_MASK (GET_MODE (x
)) != mask
8609 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
8611 unsigned HOST_WIDE_INT cval
8612 = UINTVAL (XEXP (x
, 1))
8613 | (GET_MODE_MASK (GET_MODE (x
)) & ~mask
);
8616 y
= simplify_gen_binary (AND
, GET_MODE (x
), XEXP (x
, 0),
8617 gen_int_mode (cval
, GET_MODE (x
)));
8618 if (set_src_cost (y
, GET_MODE (x
), optimize_this_for_speed_p
)
8619 < set_src_cost (x
, GET_MODE (x
), optimize_this_for_speed_p
))
8629 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8630 low-order bits (as in an alignment operation) and FOO is already
8631 aligned to that boundary, mask C1 to that boundary as well.
8632 This may eliminate that PLUS and, later, the AND. */
8635 unsigned int width
= GET_MODE_PRECISION (mode
);
8636 unsigned HOST_WIDE_INT smask
= mask
;
8638 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8639 number, sign extend it. */
8641 if (width
< HOST_BITS_PER_WIDE_INT
8642 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8643 smask
|= HOST_WIDE_INT_M1U
<< width
;
8645 if (CONST_INT_P (XEXP (x
, 1))
8646 && pow2p_hwi (- smask
)
8647 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8648 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8649 return force_to_mode (plus_constant (GET_MODE (x
), XEXP (x
, 0),
8650 (INTVAL (XEXP (x
, 1)) & smask
)),
8651 mode
, smask
, next_select
);
8657 /* Substituting into the operands of a widening MULT is not likely to
8658 create RTL matching a machine insn. */
8660 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8661 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8662 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8663 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8664 && REG_P (XEXP (XEXP (x
, 0), 0))
8665 && REG_P (XEXP (XEXP (x
, 1), 0)))
8666 return gen_lowpart_or_truncate (mode
, x
);
8668 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8669 most significant bit in MASK since carries from those bits will
8670 affect the bits we are interested in. */
8675 /* If X is (minus C Y) where C's least set bit is larger than any bit
8676 in the mask, then we may replace with (neg Y). */
8677 if (CONST_INT_P (XEXP (x
, 0))
8678 && least_bit_hwi (UINTVAL (XEXP (x
, 0))) > mask
)
8680 x
= simplify_gen_unary (NEG
, GET_MODE (x
), XEXP (x
, 1),
8682 return force_to_mode (x
, mode
, mask
, next_select
);
8685 /* Similarly, if C contains every bit in the fuller_mask, then we may
8686 replace with (not Y). */
8687 if (CONST_INT_P (XEXP (x
, 0))
8688 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8690 x
= simplify_gen_unary (NOT
, GET_MODE (x
),
8691 XEXP (x
, 1), GET_MODE (x
));
8692 return force_to_mode (x
, mode
, mask
, next_select
);
8700 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8701 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8702 operation which may be a bitfield extraction. Ensure that the
8703 constant we form is not wider than the mode of X. */
8705 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8706 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8707 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8708 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8709 && CONST_INT_P (XEXP (x
, 1))
8710 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8711 + floor_log2 (INTVAL (XEXP (x
, 1))))
8712 < GET_MODE_PRECISION (GET_MODE (x
)))
8713 && (UINTVAL (XEXP (x
, 1))
8714 & ~nonzero_bits (XEXP (x
, 0), GET_MODE (x
))) == 0)
8716 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8717 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8719 temp
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
8720 XEXP (XEXP (x
, 0), 0), temp
);
8721 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), temp
,
8722 XEXP (XEXP (x
, 0), 1));
8723 return force_to_mode (x
, mode
, mask
, next_select
);
8727 /* For most binary operations, just propagate into the operation and
8728 change the mode if we have an operation of that mode. */
8730 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8731 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8733 /* If we ended up truncating both operands, truncate the result of the
8734 operation instead. */
8735 if (GET_CODE (op0
) == TRUNCATE
8736 && GET_CODE (op1
) == TRUNCATE
)
8738 op0
= XEXP (op0
, 0);
8739 op1
= XEXP (op1
, 0);
8742 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8743 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8745 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8746 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8750 /* For left shifts, do the same, but just for the first operand.
8751 However, we cannot do anything with shifts where we cannot
8752 guarantee that the counts are smaller than the size of the mode
8753 because such a count will have a different meaning in a
8756 if (! (CONST_INT_P (XEXP (x
, 1))
8757 && INTVAL (XEXP (x
, 1)) >= 0
8758 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8759 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8760 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8761 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8764 /* If the shift count is a constant and we can do arithmetic in
8765 the mode of the shift, refine which bits we need. Otherwise, use the
8766 conservative form of the mask. */
8767 if (CONST_INT_P (XEXP (x
, 1))
8768 && INTVAL (XEXP (x
, 1)) >= 0
8769 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8770 && HWI_COMPUTABLE_MODE_P (op_mode
))
8771 mask
>>= INTVAL (XEXP (x
, 1));
8775 op0
= gen_lowpart_or_truncate (op_mode
,
8776 force_to_mode (XEXP (x
, 0), op_mode
,
8777 mask
, next_select
));
8779 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8780 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8784 /* Here we can only do something if the shift count is a constant,
8785 this shift constant is valid for the host, and we can do arithmetic
8788 if (CONST_INT_P (XEXP (x
, 1))
8789 && INTVAL (XEXP (x
, 1)) >= 0
8790 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8791 && HWI_COMPUTABLE_MODE_P (op_mode
))
8793 rtx inner
= XEXP (x
, 0);
8794 unsigned HOST_WIDE_INT inner_mask
;
8796 /* Select the mask of the bits we need for the shift operand. */
8797 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8799 /* We can only change the mode of the shift if we can do arithmetic
8800 in the mode of the shift and INNER_MASK is no wider than the
8801 width of X's mode. */
8802 if ((inner_mask
& ~GET_MODE_MASK (GET_MODE (x
))) != 0)
8803 op_mode
= GET_MODE (x
);
8805 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8807 if (GET_MODE (x
) != op_mode
|| inner
!= XEXP (x
, 0))
8808 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8811 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8812 shift and AND produces only copies of the sign bit (C2 is one less
8813 than a power of two), we can do this with just a shift. */
8815 if (GET_CODE (x
) == LSHIFTRT
8816 && CONST_INT_P (XEXP (x
, 1))
8817 /* The shift puts one of the sign bit copies in the least significant
8819 && ((INTVAL (XEXP (x
, 1))
8820 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8821 >= GET_MODE_PRECISION (GET_MODE (x
)))
8822 && pow2p_hwi (mask
+ 1)
8823 /* Number of bits left after the shift must be more than the mask
8825 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8826 <= GET_MODE_PRECISION (GET_MODE (x
)))
8827 /* Must be more sign bit copies than the mask needs. */
8828 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8829 >= exact_log2 (mask
+ 1)))
8830 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8831 GEN_INT (GET_MODE_PRECISION (GET_MODE (x
))
8832 - exact_log2 (mask
+ 1)));
8837 /* If we are just looking for the sign bit, we don't need this shift at
8838 all, even if it has a variable count. */
8839 if (val_signbit_p (GET_MODE (x
), mask
))
8840 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8842 /* If this is a shift by a constant, get a mask that contains those bits
8843 that are not copies of the sign bit. We then have two cases: If
8844 MASK only includes those bits, this can be a logical shift, which may
8845 allow simplifications. If MASK is a single-bit field not within
8846 those bits, we are requesting a copy of the sign bit and hence can
8847 shift the sign bit to the appropriate location. */
8849 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8850 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8854 /* If the considered data is wider than HOST_WIDE_INT, we can't
8855 represent a mask for all its bits in a single scalar.
8856 But we only care about the lower bits, so calculate these. */
8858 if (GET_MODE_PRECISION (GET_MODE (x
)) > HOST_BITS_PER_WIDE_INT
)
8860 nonzero
= HOST_WIDE_INT_M1U
;
8862 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8863 is the number of bits a full-width mask would have set.
8864 We need only shift if these are fewer than nonzero can
8865 hold. If not, we must keep all bits set in nonzero. */
8867 if (GET_MODE_PRECISION (GET_MODE (x
)) - INTVAL (XEXP (x
, 1))
8868 < HOST_BITS_PER_WIDE_INT
)
8869 nonzero
>>= INTVAL (XEXP (x
, 1))
8870 + HOST_BITS_PER_WIDE_INT
8871 - GET_MODE_PRECISION (GET_MODE (x
)) ;
8875 nonzero
= GET_MODE_MASK (GET_MODE (x
));
8876 nonzero
>>= INTVAL (XEXP (x
, 1));
8879 if ((mask
& ~nonzero
) == 0)
8881 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, GET_MODE (x
),
8882 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8883 if (GET_CODE (x
) != ASHIFTRT
)
8884 return force_to_mode (x
, mode
, mask
, next_select
);
8887 else if ((i
= exact_log2 (mask
)) >= 0)
8889 x
= simplify_shift_const
8890 (NULL_RTX
, LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8891 GET_MODE_PRECISION (GET_MODE (x
)) - 1 - i
);
8893 if (GET_CODE (x
) != ASHIFTRT
)
8894 return force_to_mode (x
, mode
, mask
, next_select
);
8898 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8899 even if the shift count isn't a constant. */
8901 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8902 XEXP (x
, 0), XEXP (x
, 1));
8906 /* If this is a zero- or sign-extension operation that just affects bits
8907 we don't care about, remove it. Be sure the call above returned
8908 something that is still a shift. */
8910 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8911 && CONST_INT_P (XEXP (x
, 1))
8912 && INTVAL (XEXP (x
, 1)) >= 0
8913 && (INTVAL (XEXP (x
, 1))
8914 <= GET_MODE_PRECISION (GET_MODE (x
)) - (floor_log2 (mask
) + 1))
8915 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8916 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8917 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8924 /* If the shift count is constant and we can do computations
8925 in the mode of X, compute where the bits we care about are.
8926 Otherwise, we can't do anything. Don't change the mode of
8927 the shift or propagate MODE into the shift, though. */
8928 if (CONST_INT_P (XEXP (x
, 1))
8929 && INTVAL (XEXP (x
, 1)) >= 0)
8931 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8933 gen_int_mode (mask
, GET_MODE (x
)),
8935 if (temp
&& CONST_INT_P (temp
))
8936 x
= simplify_gen_binary (code
, GET_MODE (x
),
8937 force_to_mode (XEXP (x
, 0), GET_MODE (x
),
8938 INTVAL (temp
), next_select
),
8944 /* If we just want the low-order bit, the NEG isn't needed since it
8945 won't change the low-order bit. */
8947 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8949 /* We need any bits less significant than the most significant bit in
8950 MASK since carries from those bits will affect the bits we are
8956 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8957 same as the XOR case above. Ensure that the constant we form is not
8958 wider than the mode of X. */
8960 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8961 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8962 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8963 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
8964 < GET_MODE_PRECISION (GET_MODE (x
)))
8965 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
8967 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)),
8969 temp
= simplify_gen_binary (XOR
, GET_MODE (x
),
8970 XEXP (XEXP (x
, 0), 0), temp
);
8971 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8972 temp
, XEXP (XEXP (x
, 0), 1));
8974 return force_to_mode (x
, mode
, mask
, next_select
);
8977 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8978 use the full mask inside the NOT. */
8982 op0
= gen_lowpart_or_truncate (op_mode
,
8983 force_to_mode (XEXP (x
, 0), mode
, mask
,
8985 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8986 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
8990 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8991 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8992 which is equal to STORE_FLAG_VALUE. */
8993 if ((mask
& ~STORE_FLAG_VALUE
) == 0
8994 && XEXP (x
, 1) == const0_rtx
8995 && GET_MODE (XEXP (x
, 0)) == mode
8996 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
8997 && (nonzero_bits (XEXP (x
, 0), mode
)
8998 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
8999 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9004 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9005 written in a narrower mode. We play it safe and do not do so. */
9007 op0
= gen_lowpart_or_truncate (GET_MODE (x
),
9008 force_to_mode (XEXP (x
, 1), mode
,
9009 mask
, next_select
));
9010 op1
= gen_lowpart_or_truncate (GET_MODE (x
),
9011 force_to_mode (XEXP (x
, 2), mode
,
9012 mask
, next_select
));
9013 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9014 x
= simplify_gen_ternary (IF_THEN_ELSE
, GET_MODE (x
),
9015 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9023 /* Ensure we return a value of the proper mode. */
9024 return gen_lowpart_or_truncate (mode
, x
);
9027 /* Return nonzero if X is an expression that has one of two values depending on
9028 whether some other value is zero or nonzero. In that case, we return the
9029 value that is being tested, *PTRUE is set to the value if the rtx being
9030 returned has a nonzero value, and *PFALSE is set to the other alternative.
9032 If we return zero, we set *PTRUE and *PFALSE to X. */
9035 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9037 machine_mode mode
= GET_MODE (x
);
9038 enum rtx_code code
= GET_CODE (x
);
9039 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9040 unsigned HOST_WIDE_INT nz
;
9041 scalar_int_mode int_mode
;
9043 /* If we are comparing a value against zero, we are done. */
9044 if ((code
== NE
|| code
== EQ
)
9045 && XEXP (x
, 1) == const0_rtx
)
9047 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9048 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9052 /* If this is a unary operation whose operand has one of two values, apply
9053 our opcode to compute those values. */
9054 else if (UNARY_P (x
)
9055 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9057 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9058 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9059 GET_MODE (XEXP (x
, 0)));
9063 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9064 make can't possibly match and would suppress other optimizations. */
9065 else if (code
== COMPARE
)
9068 /* If this is a binary operation, see if either side has only one of two
9069 values. If either one does or if both do and they are conditional on
9070 the same value, compute the new true and false values. */
9071 else if (BINARY_P (x
))
9073 rtx op0
= XEXP (x
, 0);
9074 rtx op1
= XEXP (x
, 1);
9075 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9076 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9078 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9079 && (REG_P (op0
) || REG_P (op1
)))
9081 /* Try to enable a simplification by undoing work done by
9082 if_then_else_cond if it converted a REG into something more
9087 true0
= false0
= op0
;
9092 true1
= false1
= op1
;
9096 if ((cond0
!= 0 || cond1
!= 0)
9097 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9099 /* If if_then_else_cond returned zero, then true/false are the
9100 same rtl. We must copy one of them to prevent invalid rtl
9103 true0
= copy_rtx (true0
);
9104 else if (cond1
== 0)
9105 true1
= copy_rtx (true1
);
9107 if (COMPARISON_P (x
))
9109 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9111 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9116 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9117 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9120 return cond0
? cond0
: cond1
;
9123 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9124 operands is zero when the other is nonzero, and vice-versa,
9125 and STORE_FLAG_VALUE is 1 or -1. */
9127 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9128 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9130 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9132 rtx op0
= XEXP (XEXP (x
, 0), 1);
9133 rtx op1
= XEXP (XEXP (x
, 1), 1);
9135 cond0
= XEXP (XEXP (x
, 0), 0);
9136 cond1
= XEXP (XEXP (x
, 1), 0);
9138 if (COMPARISON_P (cond0
)
9139 && COMPARISON_P (cond1
)
9140 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9141 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9142 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9143 || ((swap_condition (GET_CODE (cond0
))
9144 == reversed_comparison_code (cond1
, NULL
))
9145 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9146 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9147 && ! side_effects_p (x
))
9149 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9150 *pfalse
= simplify_gen_binary (MULT
, mode
,
9152 ? simplify_gen_unary (NEG
, mode
,
9160 /* Similarly for MULT, AND and UMIN, except that for these the result
9162 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9163 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9164 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9166 cond0
= XEXP (XEXP (x
, 0), 0);
9167 cond1
= XEXP (XEXP (x
, 1), 0);
9169 if (COMPARISON_P (cond0
)
9170 && COMPARISON_P (cond1
)
9171 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9172 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9173 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9174 || ((swap_condition (GET_CODE (cond0
))
9175 == reversed_comparison_code (cond1
, NULL
))
9176 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9177 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9178 && ! side_effects_p (x
))
9180 *ptrue
= *pfalse
= const0_rtx
;
9186 else if (code
== IF_THEN_ELSE
)
9188 /* If we have IF_THEN_ELSE already, extract the condition and
9189 canonicalize it if it is NE or EQ. */
9190 cond0
= XEXP (x
, 0);
9191 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9192 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9193 return XEXP (cond0
, 0);
9194 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9196 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9197 return XEXP (cond0
, 0);
9203 /* If X is a SUBREG, we can narrow both the true and false values
9204 if the inner expression, if there is a condition. */
9205 else if (code
== SUBREG
9206 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
9209 true0
= simplify_gen_subreg (mode
, true0
,
9210 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9211 false0
= simplify_gen_subreg (mode
, false0
,
9212 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9213 if (true0
&& false0
)
9221 /* If X is a constant, this isn't special and will cause confusions
9222 if we treat it as such. Likewise if it is equivalent to a constant. */
9223 else if (CONSTANT_P (x
)
9224 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9227 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9228 will be least confusing to the rest of the compiler. */
9229 else if (mode
== BImode
)
9231 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9235 /* If X is known to be either 0 or -1, those are the true and
9236 false values when testing X. */
9237 else if (x
== constm1_rtx
|| x
== const0_rtx
9238 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9239 && (num_sign_bit_copies (x
, int_mode
)
9240 == GET_MODE_PRECISION (int_mode
))))
9242 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9246 /* Likewise for 0 or a single bit. */
9247 else if (HWI_COMPUTABLE_MODE_P (mode
)
9248 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9250 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9254 /* Otherwise fail; show no condition with true and false values the same. */
9255 *ptrue
= *pfalse
= x
;
9259 /* Return the value of expression X given the fact that condition COND
9260 is known to be true when applied to REG as its first operand and VAL
9261 as its second. X is known to not be shared and so can be modified in
9264 We only handle the simplest cases, and specifically those cases that
9265 arise with IF_THEN_ELSE expressions. */
9268 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9270 enum rtx_code code
= GET_CODE (x
);
9274 if (side_effects_p (x
))
9277 /* If either operand of the condition is a floating point value,
9278 then we have to avoid collapsing an EQ comparison. */
9280 && rtx_equal_p (x
, reg
)
9281 && ! FLOAT_MODE_P (GET_MODE (x
))
9282 && ! FLOAT_MODE_P (GET_MODE (val
)))
9285 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9288 /* If X is (abs REG) and we know something about REG's relationship
9289 with zero, we may be able to simplify this. */
9291 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9294 case GE
: case GT
: case EQ
:
9297 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9299 GET_MODE (XEXP (x
, 0)));
9304 /* The only other cases we handle are MIN, MAX, and comparisons if the
9305 operands are the same as REG and VAL. */
9307 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9309 if (rtx_equal_p (XEXP (x
, 0), val
))
9311 std::swap (val
, reg
);
9312 cond
= swap_condition (cond
);
9315 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9317 if (COMPARISON_P (x
))
9319 if (comparison_dominates_p (cond
, code
))
9320 return const_true_rtx
;
9322 code
= reversed_comparison_code (x
, NULL
);
9324 && comparison_dominates_p (cond
, code
))
9329 else if (code
== SMAX
|| code
== SMIN
9330 || code
== UMIN
|| code
== UMAX
)
9332 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9334 /* Do not reverse the condition when it is NE or EQ.
9335 This is because we cannot conclude anything about
9336 the value of 'SMAX (x, y)' when x is not equal to y,
9337 but we can when x equals y. */
9338 if ((code
== SMAX
|| code
== UMAX
)
9339 && ! (cond
== EQ
|| cond
== NE
))
9340 cond
= reverse_condition (cond
);
9345 return unsignedp
? x
: XEXP (x
, 1);
9347 return unsignedp
? x
: XEXP (x
, 0);
9349 return unsignedp
? XEXP (x
, 1) : x
;
9351 return unsignedp
? XEXP (x
, 0) : x
;
9358 else if (code
== SUBREG
)
9360 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9361 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9363 if (SUBREG_REG (x
) != r
)
9365 /* We must simplify subreg here, before we lose track of the
9366 original inner_mode. */
9367 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9368 inner_mode
, SUBREG_BYTE (x
));
9372 SUBST (SUBREG_REG (x
), r
);
9377 /* We don't have to handle SIGN_EXTEND here, because even in the
9378 case of replacing something with a modeless CONST_INT, a
9379 CONST_INT is already (supposed to be) a valid sign extension for
9380 its narrower mode, which implies it's already properly
9381 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9382 story is different. */
9383 else if (code
== ZERO_EXTEND
)
9385 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9386 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9388 if (XEXP (x
, 0) != r
)
9390 /* We must simplify the zero_extend here, before we lose
9391 track of the original inner_mode. */
9392 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9397 SUBST (XEXP (x
, 0), r
);
9403 fmt
= GET_RTX_FORMAT (code
);
9404 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9407 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9408 else if (fmt
[i
] == 'E')
9409 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9410 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9417 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9418 assignment as a field assignment. */
9421 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9423 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9425 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9427 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9429 /* For big endian, adjust the memory offset. */
9430 if (BYTES_BIG_ENDIAN
)
9431 x
= adjust_address_nv (x
, GET_MODE (y
),
9432 -subreg_lowpart_offset (GET_MODE (x
),
9435 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9438 if (x
== y
|| rtx_equal_p (x
, y
))
9441 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9444 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9445 Note that all SUBREGs of MEM are paradoxical; otherwise they
9446 would have been rewritten. */
9447 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9448 && MEM_P (SUBREG_REG (y
))
9449 && rtx_equal_p (SUBREG_REG (y
),
9450 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9453 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9454 && MEM_P (SUBREG_REG (x
))
9455 && rtx_equal_p (SUBREG_REG (x
),
9456 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9459 /* We used to see if get_last_value of X and Y were the same but that's
9460 not correct. In one direction, we'll cause the assignment to have
9461 the wrong destination and in the case, we'll import a register into this
9462 insn that might have already have been dead. So fail if none of the
9463 above cases are true. */
9467 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9468 Return that assignment if so.
9470 We only handle the most common cases. */
9473 make_field_assignment (rtx x
)
9475 rtx dest
= SET_DEST (x
);
9476 rtx src
= SET_SRC (x
);
9481 unsigned HOST_WIDE_INT len
;
9484 /* All the rules in this function are specific to scalar integers. */
9485 scalar_int_mode mode
;
9486 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9489 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9490 a clear of a one-bit field. We will have changed it to
9491 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9494 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9495 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9496 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9497 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9499 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9502 return gen_rtx_SET (assign
, const0_rtx
);
9506 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9507 && subreg_lowpart_p (XEXP (src
, 0))
9508 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
9509 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
9510 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9511 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9512 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9513 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9515 assign
= make_extraction (VOIDmode
, dest
, 0,
9516 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9519 return gen_rtx_SET (assign
, const0_rtx
);
9523 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9525 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9526 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9527 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9529 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9532 return gen_rtx_SET (assign
, const1_rtx
);
9536 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9537 SRC is an AND with all bits of that field set, then we can discard
9539 if (GET_CODE (dest
) == ZERO_EXTRACT
9540 && CONST_INT_P (XEXP (dest
, 1))
9541 && GET_CODE (src
) == AND
9542 && CONST_INT_P (XEXP (src
, 1)))
9544 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9545 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9546 unsigned HOST_WIDE_INT ze_mask
;
9548 if (width
>= HOST_BITS_PER_WIDE_INT
)
9551 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9553 /* Complete overlap. We can remove the source AND. */
9554 if ((and_mask
& ze_mask
) == ze_mask
)
9555 return gen_rtx_SET (dest
, XEXP (src
, 0));
9557 /* Partial overlap. We can reduce the source AND. */
9558 if ((and_mask
& ze_mask
) != and_mask
)
9560 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9561 gen_int_mode (and_mask
& ze_mask
, mode
));
9562 return gen_rtx_SET (dest
, src
);
9566 /* The other case we handle is assignments into a constant-position
9567 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9568 a mask that has all one bits except for a group of zero bits and
9569 OTHER is known to have zeros where C1 has ones, this is such an
9570 assignment. Compute the position and length from C1. Shift OTHER
9571 to the appropriate position, force it to the required mode, and
9572 make the extraction. Check for the AND in both operands. */
9574 /* One or more SUBREGs might obscure the constant-position field
9575 assignment. The first one we are likely to encounter is an outer
9576 narrowing SUBREG, which we can just strip for the purposes of
9577 identifying the constant-field assignment. */
9578 scalar_int_mode src_mode
= mode
;
9579 if (GET_CODE (src
) == SUBREG
9580 && subreg_lowpart_p (src
)
9581 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9582 src
= SUBREG_REG (src
);
9584 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9587 rhs
= expand_compound_operation (XEXP (src
, 0));
9588 lhs
= expand_compound_operation (XEXP (src
, 1));
9590 if (GET_CODE (rhs
) == AND
9591 && CONST_INT_P (XEXP (rhs
, 1))
9592 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9593 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9594 /* The second SUBREG that might get in the way is a paradoxical
9595 SUBREG around the first operand of the AND. We want to
9596 pretend the operand is as wide as the destination here. We
9597 do this by adjusting the MEM to wider mode for the sole
9598 purpose of the call to rtx_equal_for_field_assignment_p. Also
9599 note this trick only works for MEMs. */
9600 else if (GET_CODE (rhs
) == AND
9601 && paradoxical_subreg_p (XEXP (rhs
, 0))
9602 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9603 && CONST_INT_P (XEXP (rhs
, 1))
9604 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9606 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9607 else if (GET_CODE (lhs
) == AND
9608 && CONST_INT_P (XEXP (lhs
, 1))
9609 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9610 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9611 /* The second SUBREG that might get in the way is a paradoxical
9612 SUBREG around the first operand of the AND. We want to
9613 pretend the operand is as wide as the destination here. We
9614 do this by adjusting the MEM to wider mode for the sole
9615 purpose of the call to rtx_equal_for_field_assignment_p. Also
9616 note this trick only works for MEMs. */
9617 else if (GET_CODE (lhs
) == AND
9618 && paradoxical_subreg_p (XEXP (lhs
, 0))
9619 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9620 && CONST_INT_P (XEXP (lhs
, 1))
9621 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9623 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9627 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9629 || pos
+ len
> GET_MODE_PRECISION (mode
)
9630 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9631 || (c1
& nonzero_bits (other
, mode
)) != 0)
9634 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9638 /* The mode to use for the source is the mode of the assignment, or of
9639 what is inside a possible STRICT_LOW_PART. */
9640 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9641 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9643 /* Shift OTHER right POS places and make it the source, restricting it
9644 to the proper length and mode. */
9646 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9647 src_mode
, other
, pos
),
9649 src
= force_to_mode (src
, new_mode
,
9650 len
>= HOST_BITS_PER_WIDE_INT
9652 : (HOST_WIDE_INT_1U
<< len
) - 1,
9655 /* If SRC is masked by an AND that does not make a difference in
9656 the value being stored, strip it. */
9657 if (GET_CODE (assign
) == ZERO_EXTRACT
9658 && CONST_INT_P (XEXP (assign
, 1))
9659 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9660 && GET_CODE (src
) == AND
9661 && CONST_INT_P (XEXP (src
, 1))
9662 && UINTVAL (XEXP (src
, 1))
9663 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9664 src
= XEXP (src
, 0);
9666 return gen_rtx_SET (assign
, src
);
9669 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9673 apply_distributive_law (rtx x
)
9675 enum rtx_code code
= GET_CODE (x
);
9676 enum rtx_code inner_code
;
9677 rtx lhs
, rhs
, other
;
9680 /* Distributivity is not true for floating point as it can change the
9681 value. So we don't do it unless -funsafe-math-optimizations. */
9682 if (FLOAT_MODE_P (GET_MODE (x
))
9683 && ! flag_unsafe_math_optimizations
)
9686 /* The outer operation can only be one of the following: */
9687 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9688 && code
!= PLUS
&& code
!= MINUS
)
9694 /* If either operand is a primitive we can't do anything, so get out
9696 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9699 lhs
= expand_compound_operation (lhs
);
9700 rhs
= expand_compound_operation (rhs
);
9701 inner_code
= GET_CODE (lhs
);
9702 if (inner_code
!= GET_CODE (rhs
))
9705 /* See if the inner and outer operations distribute. */
9712 /* These all distribute except over PLUS. */
9713 if (code
== PLUS
|| code
== MINUS
)
9718 if (code
!= PLUS
&& code
!= MINUS
)
9723 /* This is also a multiply, so it distributes over everything. */
9726 /* This used to handle SUBREG, but this turned out to be counter-
9727 productive, since (subreg (op ...)) usually is not handled by
9728 insn patterns, and this "optimization" therefore transformed
9729 recognizable patterns into unrecognizable ones. Therefore the
9730 SUBREG case was removed from here.
9732 It is possible that distributing SUBREG over arithmetic operations
9733 leads to an intermediate result than can then be optimized further,
9734 e.g. by moving the outer SUBREG to the other side of a SET as done
9735 in simplify_set. This seems to have been the original intent of
9736 handling SUBREGs here.
9738 However, with current GCC this does not appear to actually happen,
9739 at least on major platforms. If some case is found where removing
9740 the SUBREG case here prevents follow-on optimizations, distributing
9741 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9747 /* Set LHS and RHS to the inner operands (A and B in the example
9748 above) and set OTHER to the common operand (C in the example).
9749 There is only one way to do this unless the inner operation is
9751 if (COMMUTATIVE_ARITH_P (lhs
)
9752 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9753 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9754 else if (COMMUTATIVE_ARITH_P (lhs
)
9755 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9756 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9757 else if (COMMUTATIVE_ARITH_P (lhs
)
9758 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9759 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9760 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9761 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9765 /* Form the new inner operation, seeing if it simplifies first. */
9766 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9768 /* There is one exception to the general way of distributing:
9769 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9770 if (code
== XOR
&& inner_code
== IOR
)
9773 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9776 /* We may be able to continuing distributing the result, so call
9777 ourselves recursively on the inner operation before forming the
9778 outer operation, which we return. */
9779 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9780 apply_distributive_law (tem
), other
);
9783 /* See if X is of the form (* (+ A B) C), and if so convert to
9784 (+ (* A C) (* B C)) and try to simplify.
9786 Most of the time, this results in no change. However, if some of
9787 the operands are the same or inverses of each other, simplifications
9790 For example, (and (ior A B) (not B)) can occur as the result of
9791 expanding a bit field assignment. When we apply the distributive
9792 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9793 which then simplifies to (and (A (not B))).
9795 Note that no checks happen on the validity of applying the inverse
9796 distributive law. This is pointless since we can do it in the
9797 few places where this routine is called.
9799 N is the index of the term that is decomposed (the arithmetic operation,
9800 i.e. (+ A B) in the first example above). !N is the index of the term that
9801 is distributed, i.e. of C in the first example above. */
9803 distribute_and_simplify_rtx (rtx x
, int n
)
9806 enum rtx_code outer_code
, inner_code
;
9807 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9809 /* Distributivity is not true for floating point as it can change the
9810 value. So we don't do it unless -funsafe-math-optimizations. */
9811 if (FLOAT_MODE_P (GET_MODE (x
))
9812 && ! flag_unsafe_math_optimizations
)
9815 decomposed
= XEXP (x
, n
);
9816 if (!ARITHMETIC_P (decomposed
))
9819 mode
= GET_MODE (x
);
9820 outer_code
= GET_CODE (x
);
9821 distributed
= XEXP (x
, !n
);
9823 inner_code
= GET_CODE (decomposed
);
9824 inner_op0
= XEXP (decomposed
, 0);
9825 inner_op1
= XEXP (decomposed
, 1);
9827 /* Special case (and (xor B C) (not A)), which is equivalent to
9828 (xor (ior A B) (ior A C)) */
9829 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9831 distributed
= XEXP (distributed
, 0);
9837 /* Distribute the second term. */
9838 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9839 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9843 /* Distribute the first term. */
9844 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9845 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9848 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9850 if (GET_CODE (tmp
) != outer_code
9851 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9852 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9858 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9859 in MODE. Return an equivalent form, if different from (and VAROP
9860 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9863 simplify_and_const_int_1 (machine_mode mode
, rtx varop
,
9864 unsigned HOST_WIDE_INT constop
)
9866 unsigned HOST_WIDE_INT nonzero
;
9867 unsigned HOST_WIDE_INT orig_constop
;
9872 orig_constop
= constop
;
9873 if (GET_CODE (varop
) == CLOBBER
)
9876 /* Simplify VAROP knowing that we will be only looking at some of the
9879 Note by passing in CONSTOP, we guarantee that the bits not set in
9880 CONSTOP are not significant and will never be examined. We must
9881 ensure that is the case by explicitly masking out those bits
9882 before returning. */
9883 varop
= force_to_mode (varop
, mode
, constop
, 0);
9885 /* If VAROP is a CLOBBER, we will fail so return it. */
9886 if (GET_CODE (varop
) == CLOBBER
)
9889 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9890 to VAROP and return the new constant. */
9891 if (CONST_INT_P (varop
))
9892 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9894 /* See what bits may be nonzero in VAROP. Unlike the general case of
9895 a call to nonzero_bits, here we don't care about bits outside
9898 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9900 /* Turn off all bits in the constant that are known to already be zero.
9901 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9902 which is tested below. */
9906 /* If we don't have any bits left, return zero. */
9910 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9911 a power of two, we can replace this with an ASHIFT. */
9912 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9913 && (i
= exact_log2 (constop
)) >= 0)
9914 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9916 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9917 or XOR, then try to apply the distributive law. This may eliminate
9918 operations if either branch can be simplified because of the AND.
9919 It may also make some cases more complex, but those cases probably
9920 won't match a pattern either with or without this. */
9922 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9926 apply_distributive_law
9927 (simplify_gen_binary (GET_CODE (varop
), GET_MODE (varop
),
9928 simplify_and_const_int (NULL_RTX
,
9932 simplify_and_const_int (NULL_RTX
,
9937 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9938 the AND and see if one of the operands simplifies to zero. If so, we
9939 may eliminate it. */
9941 if (GET_CODE (varop
) == PLUS
9942 && pow2p_hwi (constop
+ 1))
9946 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9947 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9948 if (o0
== const0_rtx
)
9950 if (o1
== const0_rtx
)
9954 /* Make a SUBREG if necessary. If we can't make it, fail. */
9955 varop
= gen_lowpart (mode
, varop
);
9956 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
9959 /* If we are only masking insignificant bits, return VAROP. */
9960 if (constop
== nonzero
)
9963 if (varop
== orig_varop
&& constop
== orig_constop
)
9966 /* Otherwise, return an AND. */
9967 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
9971 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9974 Return an equivalent form, if different from X. Otherwise, return X. If
9975 X is zero, we are to always construct the equivalent form. */
9978 simplify_and_const_int (rtx x
, machine_mode mode
, rtx varop
,
9979 unsigned HOST_WIDE_INT constop
)
9981 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
9986 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
9987 gen_int_mode (constop
, mode
));
9988 if (GET_MODE (x
) != mode
)
9989 x
= gen_lowpart (mode
, x
);
9993 /* Given a REG, X, compute which bits in X can be nonzero.
9994 We don't care about bits outside of those defined in MODE.
9996 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9997 a shift, AND, or zero_extract, we can do better. */
10000 reg_nonzero_bits_for_combine (const_rtx x
, machine_mode mode
,
10001 const_rtx known_x ATTRIBUTE_UNUSED
,
10002 machine_mode known_mode ATTRIBUTE_UNUSED
,
10003 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED
,
10004 unsigned HOST_WIDE_INT
*nonzero
)
10007 reg_stat_type
*rsp
;
10009 /* If X is a register whose nonzero bits value is current, use it.
10010 Otherwise, if X is a register whose value we can find, use that
10011 value. Otherwise, use the previously-computed global nonzero bits
10012 for this register. */
10014 rsp
= ®_stat
[REGNO (x
)];
10015 if (rsp
->last_set_value
!= 0
10016 && (rsp
->last_set_mode
== mode
10017 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10018 && GET_MODE_CLASS (mode
) == MODE_INT
))
10019 && ((rsp
->last_set_label
>= label_tick_ebb_start
10020 && rsp
->last_set_label
< label_tick
)
10021 || (rsp
->last_set_label
== label_tick
10022 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10023 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10024 && REGNO (x
) < reg_n_sets_max
10025 && REG_N_SETS (REGNO (x
)) == 1
10026 && !REGNO_REG_SET_P
10027 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10030 /* Note that, even if the precision of last_set_mode is lower than that
10031 of mode, record_value_for_reg invoked nonzero_bits on the register
10032 with nonzero_bits_mode (because last_set_mode is necessarily integral
10033 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10034 are all valid, hence in mode too since nonzero_bits_mode is defined
10035 to the largest HWI_COMPUTABLE_MODE_P mode. */
10036 *nonzero
&= rsp
->last_set_nonzero_bits
;
10040 tem
= get_last_value (x
);
10043 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10044 tem
= sign_extend_short_imm (tem
, GET_MODE (x
),
10045 GET_MODE_PRECISION (mode
));
10050 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10052 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10054 if (GET_MODE_PRECISION (GET_MODE (x
)) < GET_MODE_PRECISION (mode
))
10055 /* We don't know anything about the upper bits. */
10056 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (GET_MODE (x
));
10064 /* Return the number of bits at the high-order end of X that are known to
10065 be equal to the sign bit. X will be used in mode MODE; if MODE is
10066 VOIDmode, X will be used in its own mode. The returned value will always
10067 be between 1 and the number of bits in MODE. */
10070 reg_num_sign_bit_copies_for_combine (const_rtx x
, machine_mode mode
,
10071 const_rtx known_x ATTRIBUTE_UNUSED
,
10072 machine_mode known_mode
10074 unsigned int known_ret ATTRIBUTE_UNUSED
,
10075 unsigned int *result
)
10078 reg_stat_type
*rsp
;
10080 rsp
= ®_stat
[REGNO (x
)];
10081 if (rsp
->last_set_value
!= 0
10082 && rsp
->last_set_mode
== mode
10083 && ((rsp
->last_set_label
>= label_tick_ebb_start
10084 && rsp
->last_set_label
< label_tick
)
10085 || (rsp
->last_set_label
== label_tick
10086 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10087 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10088 && REGNO (x
) < reg_n_sets_max
10089 && REG_N_SETS (REGNO (x
)) == 1
10090 && !REGNO_REG_SET_P
10091 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10094 *result
= rsp
->last_set_sign_bit_copies
;
10098 tem
= get_last_value (x
);
10102 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10103 && GET_MODE_PRECISION (GET_MODE (x
)) == GET_MODE_PRECISION (mode
))
10104 *result
= rsp
->sign_bit_copies
;
10109 /* Return the number of "extended" bits there are in X, when interpreted
10110 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10111 unsigned quantities, this is the number of high-order zero bits.
10112 For signed quantities, this is the number of copies of the sign bit
10113 minus 1. In both case, this function returns the number of "spare"
10114 bits. For example, if two quantities for which this function returns
10115 at least 1 are added, the addition is known not to overflow.
10117 This function will always return 0 unless called during combine, which
10118 implies that it must be called from a define_split. */
10121 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10123 if (nonzero_sign_valid
== 0)
10126 scalar_int_mode int_mode
;
10128 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10129 && HWI_COMPUTABLE_MODE_P (int_mode
)
10130 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10131 - floor_log2 (nonzero_bits (x
, int_mode
)))
10133 : num_sign_bit_copies (x
, mode
) - 1);
10136 /* This function is called from `simplify_shift_const' to merge two
10137 outer operations. Specifically, we have already found that we need
10138 to perform operation *POP0 with constant *PCONST0 at the outermost
10139 position. We would now like to also perform OP1 with constant CONST1
10140 (with *POP0 being done last).
10142 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10143 the resulting operation. *PCOMP_P is set to 1 if we would need to
10144 complement the innermost operand, otherwise it is unchanged.
10146 MODE is the mode in which the operation will be done. No bits outside
10147 the width of this mode matter. It is assumed that the width of this mode
10148 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10150 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10151 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10152 result is simply *PCONST0.
10154 If the resulting operation cannot be expressed as one operation, we
10155 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10158 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10160 enum rtx_code op0
= *pop0
;
10161 HOST_WIDE_INT const0
= *pconst0
;
10163 const0
&= GET_MODE_MASK (mode
);
10164 const1
&= GET_MODE_MASK (mode
);
10166 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10170 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10173 if (op1
== UNKNOWN
|| op0
== SET
)
10176 else if (op0
== UNKNOWN
)
10177 op0
= op1
, const0
= const1
;
10179 else if (op0
== op1
)
10203 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10204 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10207 /* If the two constants aren't the same, we can't do anything. The
10208 remaining six cases can all be done. */
10209 else if (const0
!= const1
)
10217 /* (a & b) | b == b */
10219 else /* op1 == XOR */
10220 /* (a ^ b) | b == a | b */
10226 /* (a & b) ^ b == (~a) & b */
10227 op0
= AND
, *pcomp_p
= 1;
10228 else /* op1 == IOR */
10229 /* (a | b) ^ b == a & ~b */
10230 op0
= AND
, const0
= ~const0
;
10235 /* (a | b) & b == b */
10237 else /* op1 == XOR */
10238 /* (a ^ b) & b) == (~a) & b */
10245 /* Check for NO-OP cases. */
10246 const0
&= GET_MODE_MASK (mode
);
10248 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10250 else if (const0
== 0 && op0
== AND
)
10252 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10258 /* ??? Slightly redundant with the above mask, but not entirely.
10259 Moving this above means we'd have to sign-extend the mode mask
10260 for the final test. */
10261 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10262 *pconst0
= trunc_int_for_mode (const0
, mode
);
10267 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10268 the shift in. The original shift operation CODE is performed on OP in
10269 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10270 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10271 result of the shift is subject to operation OUTER_CODE with operand
10274 static machine_mode
10275 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10276 machine_mode orig_mode
, machine_mode mode
,
10277 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10279 if (orig_mode
== mode
)
10281 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10283 /* In general we can't perform in wider mode for right shift and rotate. */
10287 /* We can still widen if the bits brought in from the left are identical
10288 to the sign bit of ORIG_MODE. */
10289 if (num_sign_bit_copies (op
, mode
)
10290 > (unsigned) (GET_MODE_PRECISION (mode
)
10291 - GET_MODE_PRECISION (orig_mode
)))
10296 /* Similarly here but with zero bits. */
10297 if (HWI_COMPUTABLE_MODE_P (mode
)
10298 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10301 /* We can also widen if the bits brought in will be masked off. This
10302 operation is performed in ORIG_MODE. */
10303 if (outer_code
== AND
)
10305 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10308 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10317 gcc_unreachable ();
10324 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10325 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10326 if we cannot simplify it. Otherwise, return a simplified value.
10328 The shift is normally computed in the widest mode we find in VAROP, as
10329 long as it isn't a different number of words than RESULT_MODE. Exceptions
10330 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10333 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10334 rtx varop
, int orig_count
)
10336 enum rtx_code orig_code
= code
;
10337 rtx orig_varop
= varop
;
10339 machine_mode mode
= result_mode
;
10340 machine_mode shift_mode
;
10341 scalar_int_mode tmode
, inner_mode
;
10342 unsigned int mode_words
10343 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10344 /* We form (outer_op (code varop count) (outer_const)). */
10345 enum rtx_code outer_op
= UNKNOWN
;
10346 HOST_WIDE_INT outer_const
= 0;
10347 int complement_p
= 0;
10350 /* Make sure and truncate the "natural" shift on the way in. We don't
10351 want to do this inside the loop as it makes it more difficult to
10353 if (SHIFT_COUNT_TRUNCATED
)
10354 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10356 /* If we were given an invalid count, don't do anything except exactly
10357 what was requested. */
10359 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10362 count
= orig_count
;
10364 /* Unless one of the branches of the `if' in this loop does a `continue',
10365 we will `break' the loop after the `if'. */
10369 /* If we have an operand of (clobber (const_int 0)), fail. */
10370 if (GET_CODE (varop
) == CLOBBER
)
10373 /* Convert ROTATERT to ROTATE. */
10374 if (code
== ROTATERT
)
10376 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10378 count
= bitsize
- count
;
10381 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
,
10382 mode
, outer_op
, outer_const
);
10383 machine_mode shift_unit_mode
= GET_MODE_INNER (shift_mode
);
10385 /* Handle cases where the count is greater than the size of the mode
10386 minus 1. For ASHIFT, use the size minus one as the count (this can
10387 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10388 take the count modulo the size. For other shifts, the result is
10391 Since these shifts are being produced by the compiler by combining
10392 multiple operations, each of which are defined, we know what the
10393 result is supposed to be. */
10395 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10397 if (code
== ASHIFTRT
)
10398 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10399 else if (code
== ROTATE
|| code
== ROTATERT
)
10400 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10403 /* We can't simply return zero because there may be an
10405 varop
= const0_rtx
;
10411 /* If we discovered we had to complement VAROP, leave. Making a NOT
10412 here would cause an infinite loop. */
10416 if (shift_mode
== shift_unit_mode
)
10418 /* An arithmetic right shift of a quantity known to be -1 or 0
10420 if (code
== ASHIFTRT
10421 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10422 == GET_MODE_PRECISION (shift_unit_mode
)))
10428 /* If we are doing an arithmetic right shift and discarding all but
10429 the sign bit copies, this is equivalent to doing a shift by the
10430 bitsize minus one. Convert it into that shift because it will
10431 often allow other simplifications. */
10433 if (code
== ASHIFTRT
10434 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10435 >= GET_MODE_PRECISION (shift_unit_mode
)))
10436 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10438 /* We simplify the tests below and elsewhere by converting
10439 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10440 `make_compound_operation' will convert it to an ASHIFTRT for
10441 those machines (such as VAX) that don't have an LSHIFTRT. */
10442 if (code
== ASHIFTRT
10443 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10444 && val_signbit_known_clear_p (shift_unit_mode
,
10445 nonzero_bits (varop
,
10449 if (((code
== LSHIFTRT
10450 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10451 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10453 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10454 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10455 & GET_MODE_MASK (shift_unit_mode
))))
10456 && !side_effects_p (varop
))
10457 varop
= const0_rtx
;
10460 switch (GET_CODE (varop
))
10466 new_rtx
= expand_compound_operation (varop
);
10467 if (new_rtx
!= varop
)
10475 /* The following rules apply only to scalars. */
10476 if (shift_mode
!= shift_unit_mode
)
10479 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10480 minus the width of a smaller mode, we can do this with a
10481 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10482 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10483 && ! mode_dependent_address_p (XEXP (varop
, 0),
10484 MEM_ADDR_SPACE (varop
))
10485 && ! MEM_VOLATILE_P (varop
)
10486 && (int_mode_for_size (GET_MODE_BITSIZE (mode
) - count
, 1)
10489 new_rtx
= adjust_address_nv (varop
, tmode
,
10490 BYTES_BIG_ENDIAN
? 0
10491 : count
/ BITS_PER_UNIT
);
10493 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10494 : ZERO_EXTEND
, mode
, new_rtx
);
10501 /* The following rules apply only to scalars. */
10502 if (shift_mode
!= shift_unit_mode
)
10505 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10506 the same number of words as what we've seen so far. Then store
10507 the widest mode in MODE. */
10508 if (subreg_lowpart_p (varop
)
10509 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10510 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (GET_MODE (varop
))
10511 && (unsigned int) ((GET_MODE_SIZE (inner_mode
)
10512 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10514 && GET_MODE_CLASS (GET_MODE (varop
)) == MODE_INT
)
10516 varop
= SUBREG_REG (varop
);
10517 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (mode
))
10524 /* Some machines use MULT instead of ASHIFT because MULT
10525 is cheaper. But it is still better on those machines to
10526 merge two shifts into one. */
10527 if (CONST_INT_P (XEXP (varop
, 1))
10528 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10531 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10533 GEN_INT (exact_log2 (
10534 UINTVAL (XEXP (varop
, 1)))));
10540 /* Similar, for when divides are cheaper. */
10541 if (CONST_INT_P (XEXP (varop
, 1))
10542 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10545 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10547 GEN_INT (exact_log2 (
10548 UINTVAL (XEXP (varop
, 1)))));
10554 /* If we are extracting just the sign bit of an arithmetic
10555 right shift, that shift is not needed. However, the sign
10556 bit of a wider mode may be different from what would be
10557 interpreted as the sign bit in a narrower mode, so, if
10558 the result is narrower, don't discard the shift. */
10559 if (code
== LSHIFTRT
10560 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10561 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10562 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10564 varop
= XEXP (varop
, 0);
10573 /* The following rules apply only to scalars. */
10574 if (shift_mode
!= shift_unit_mode
)
10577 /* Here we have two nested shifts. The result is usually the
10578 AND of a new shift with a mask. We compute the result below. */
10579 if (CONST_INT_P (XEXP (varop
, 1))
10580 && INTVAL (XEXP (varop
, 1)) >= 0
10581 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (GET_MODE (varop
))
10582 && HWI_COMPUTABLE_MODE_P (result_mode
)
10583 && HWI_COMPUTABLE_MODE_P (mode
))
10585 enum rtx_code first_code
= GET_CODE (varop
);
10586 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10587 unsigned HOST_WIDE_INT mask
;
10590 /* We have one common special case. We can't do any merging if
10591 the inner code is an ASHIFTRT of a smaller mode. However, if
10592 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10593 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10594 we can convert it to
10595 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10596 This simplifies certain SIGN_EXTEND operations. */
10597 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10598 && count
== (GET_MODE_PRECISION (result_mode
)
10599 - GET_MODE_PRECISION (GET_MODE (varop
))))
10601 /* C3 has the low-order C1 bits zero. */
10603 mask
= GET_MODE_MASK (mode
)
10604 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10606 varop
= simplify_and_const_int (NULL_RTX
, result_mode
,
10607 XEXP (varop
, 0), mask
);
10608 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
, result_mode
,
10610 count
= first_count
;
10615 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10616 than C1 high-order bits equal to the sign bit, we can convert
10617 this to either an ASHIFT or an ASHIFTRT depending on the
10620 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10622 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10623 && GET_MODE (varop
) == shift_mode
10624 && (num_sign_bit_copies (XEXP (varop
, 0), shift_mode
)
10627 varop
= XEXP (varop
, 0);
10628 count
-= first_count
;
10638 /* There are some cases we can't do. If CODE is ASHIFTRT,
10639 we can only do this if FIRST_CODE is also ASHIFTRT.
10641 We can't do the case when CODE is ROTATE and FIRST_CODE is
10644 If the mode of this shift is not the mode of the outer shift,
10645 we can't do this if either shift is a right shift or ROTATE.
10647 Finally, we can't do any of these if the mode is too wide
10648 unless the codes are the same.
10650 Handle the case where the shift codes are the same
10653 if (code
== first_code
)
10655 if (GET_MODE (varop
) != result_mode
10656 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10657 || code
== ROTATE
))
10660 count
+= first_count
;
10661 varop
= XEXP (varop
, 0);
10665 if (code
== ASHIFTRT
10666 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10667 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
10668 || (GET_MODE (varop
) != result_mode
10669 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10670 || first_code
== ROTATE
10671 || code
== ROTATE
)))
10674 /* To compute the mask to apply after the shift, shift the
10675 nonzero bits of the inner shift the same way the
10676 outer shift will. */
10678 mask_rtx
= gen_int_mode (nonzero_bits (varop
, GET_MODE (varop
)),
10682 = simplify_const_binary_operation (code
, result_mode
, mask_rtx
,
10685 /* Give up if we can't compute an outer operation to use. */
10687 || !CONST_INT_P (mask_rtx
)
10688 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10690 result_mode
, &complement_p
))
10693 /* If the shifts are in the same direction, we add the
10694 counts. Otherwise, we subtract them. */
10695 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10696 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10697 count
+= first_count
;
10699 count
-= first_count
;
10701 /* If COUNT is positive, the new shift is usually CODE,
10702 except for the two exceptions below, in which case it is
10703 FIRST_CODE. If the count is negative, FIRST_CODE should
10706 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10707 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10709 else if (count
< 0)
10710 code
= first_code
, count
= -count
;
10712 varop
= XEXP (varop
, 0);
10716 /* If we have (A << B << C) for any shift, we can convert this to
10717 (A << C << B). This wins if A is a constant. Only try this if
10718 B is not a constant. */
10720 else if (GET_CODE (varop
) == code
10721 && CONST_INT_P (XEXP (varop
, 0))
10722 && !CONST_INT_P (XEXP (varop
, 1)))
10724 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10725 sure the result will be masked. See PR70222. */
10726 if (code
== LSHIFTRT
10727 && mode
!= result_mode
10728 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10729 GET_MODE_MASK (result_mode
)
10730 >> orig_count
, result_mode
,
10733 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10734 up outer sign extension (often left and right shift) is
10735 hardly more efficient than the original. See PR70429. */
10736 if (code
== ASHIFTRT
&& mode
!= result_mode
)
10739 rtx new_rtx
= simplify_const_binary_operation (code
, mode
,
10742 varop
= gen_rtx_fmt_ee (code
, mode
, new_rtx
, XEXP (varop
, 1));
10749 /* The following rules apply only to scalars. */
10750 if (shift_mode
!= shift_unit_mode
)
10753 /* Make this fit the case below. */
10754 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10760 /* The following rules apply only to scalars. */
10761 if (shift_mode
!= shift_unit_mode
)
10764 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10765 with C the size of VAROP - 1 and the shift is logical if
10766 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10767 we have an (le X 0) operation. If we have an arithmetic shift
10768 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10769 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10771 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10772 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10773 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10774 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10775 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10776 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10779 varop
= gen_rtx_LE (GET_MODE (varop
), XEXP (varop
, 1),
10782 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10783 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10788 /* If we have (shift (logical)), move the logical to the outside
10789 to allow it to possibly combine with another logical and the
10790 shift to combine with another shift. This also canonicalizes to
10791 what a ZERO_EXTRACT looks like. Also, some machines have
10792 (and (shift)) insns. */
10794 if (CONST_INT_P (XEXP (varop
, 1))
10795 /* We can't do this if we have (ashiftrt (xor)) and the
10796 constant has its sign bit set in shift_mode with shift_mode
10797 wider than result_mode. */
10798 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10799 && result_mode
!= shift_mode
10800 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10802 && (new_rtx
= simplify_const_binary_operation
10803 (code
, result_mode
,
10804 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10805 GEN_INT (count
))) != 0
10806 && CONST_INT_P (new_rtx
)
10807 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10808 INTVAL (new_rtx
), result_mode
, &complement_p
))
10810 varop
= XEXP (varop
, 0);
10814 /* If we can't do that, try to simplify the shift in each arm of the
10815 logical expression, make a new logical expression, and apply
10816 the inverse distributive law. This also can't be done for
10817 (ashiftrt (xor)) where we've widened the shift and the constant
10818 changes the sign bit. */
10819 if (CONST_INT_P (XEXP (varop
, 1))
10820 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10821 && result_mode
!= shift_mode
10822 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10825 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10826 XEXP (varop
, 0), count
);
10827 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10828 XEXP (varop
, 1), count
);
10830 varop
= simplify_gen_binary (GET_CODE (varop
), shift_mode
,
10832 varop
= apply_distributive_law (varop
);
10840 /* The following rules apply only to scalars. */
10841 if (shift_mode
!= shift_unit_mode
)
10844 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10845 says that the sign bit can be tested, FOO has mode MODE, C is
10846 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10847 that may be nonzero. */
10848 if (code
== LSHIFTRT
10849 && XEXP (varop
, 1) == const0_rtx
10850 && GET_MODE (XEXP (varop
, 0)) == result_mode
10851 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10852 && HWI_COMPUTABLE_MODE_P (result_mode
)
10853 && STORE_FLAG_VALUE
== -1
10854 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10855 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10858 varop
= XEXP (varop
, 0);
10865 /* The following rules apply only to scalars. */
10866 if (shift_mode
!= shift_unit_mode
)
10869 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10870 than the number of bits in the mode is equivalent to A. */
10871 if (code
== LSHIFTRT
10872 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10873 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1)
10875 varop
= XEXP (varop
, 0);
10880 /* NEG commutes with ASHIFT since it is multiplication. Move the
10881 NEG outside to allow shifts to combine. */
10883 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0, result_mode
,
10886 varop
= XEXP (varop
, 0);
10892 /* The following rules apply only to scalars. */
10893 if (shift_mode
!= shift_unit_mode
)
10896 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10897 is one less than the number of bits in the mode is
10898 equivalent to (xor A 1). */
10899 if (code
== LSHIFTRT
10900 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10901 && XEXP (varop
, 1) == constm1_rtx
10902 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10903 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10907 varop
= XEXP (varop
, 0);
10911 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10912 that might be nonzero in BAR are those being shifted out and those
10913 bits are known zero in FOO, we can replace the PLUS with FOO.
10914 Similarly in the other operand order. This code occurs when
10915 we are computing the size of a variable-size array. */
10917 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10918 && count
< HOST_BITS_PER_WIDE_INT
10919 && nonzero_bits (XEXP (varop
, 1), result_mode
) >> count
== 0
10920 && (nonzero_bits (XEXP (varop
, 1), result_mode
)
10921 & nonzero_bits (XEXP (varop
, 0), result_mode
)) == 0)
10923 varop
= XEXP (varop
, 0);
10926 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10927 && count
< HOST_BITS_PER_WIDE_INT
10928 && HWI_COMPUTABLE_MODE_P (result_mode
)
10929 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10931 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10932 & nonzero_bits (XEXP (varop
, 1),
10935 varop
= XEXP (varop
, 1);
10939 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10941 && CONST_INT_P (XEXP (varop
, 1))
10942 && (new_rtx
= simplify_const_binary_operation
10943 (ASHIFT
, result_mode
,
10944 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10945 GEN_INT (count
))) != 0
10946 && CONST_INT_P (new_rtx
)
10947 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
10948 INTVAL (new_rtx
), result_mode
, &complement_p
))
10950 varop
= XEXP (varop
, 0);
10954 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10955 signbit', and attempt to change the PLUS to an XOR and move it to
10956 the outer operation as is done above in the AND/IOR/XOR case
10957 leg for shift(logical). See details in logical handling above
10958 for reasoning in doing so. */
10959 if (code
== LSHIFTRT
10960 && CONST_INT_P (XEXP (varop
, 1))
10961 && mode_signbit_p (result_mode
, XEXP (varop
, 1))
10962 && (new_rtx
= simplify_const_binary_operation
10963 (code
, result_mode
,
10964 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10965 GEN_INT (count
))) != 0
10966 && CONST_INT_P (new_rtx
)
10967 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
10968 INTVAL (new_rtx
), result_mode
, &complement_p
))
10970 varop
= XEXP (varop
, 0);
10977 /* The following rules apply only to scalars. */
10978 if (shift_mode
!= shift_unit_mode
)
10981 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10982 with C the size of VAROP - 1 and the shift is logical if
10983 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10984 we have a (gt X 0) operation. If the shift is arithmetic with
10985 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10986 we have a (neg (gt X 0)) operation. */
10988 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10989 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
10990 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10991 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10992 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10993 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
10994 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10997 varop
= gen_rtx_GT (GET_MODE (varop
), XEXP (varop
, 1),
11000 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11001 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
11008 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11009 if the truncate does not affect the value. */
11010 if (code
== LSHIFTRT
11011 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11012 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11013 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11014 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11015 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11017 rtx varop_inner
= XEXP (varop
, 0);
11020 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11021 XEXP (varop_inner
, 0),
11023 (count
+ INTVAL (XEXP (varop_inner
, 1))));
11024 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11037 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
, mode
,
11038 outer_op
, outer_const
);
11040 /* We have now finished analyzing the shift. The result should be
11041 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11042 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11043 to the result of the shift. OUTER_CONST is the relevant constant,
11044 but we must turn off all bits turned off in the shift. */
11046 if (outer_op
== UNKNOWN
11047 && orig_code
== code
&& orig_count
== count
11048 && varop
== orig_varop
11049 && shift_mode
== GET_MODE (varop
))
11052 /* Make a SUBREG if necessary. If we can't make it, fail. */
11053 varop
= gen_lowpart (shift_mode
, varop
);
11054 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11057 /* If we have an outer operation and we just made a shift, it is
11058 possible that we could have simplified the shift were it not
11059 for the outer operation. So try to do the simplification
11062 if (outer_op
!= UNKNOWN
)
11063 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11068 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
11070 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11071 turn off all the bits that the shift would have turned off. */
11072 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11073 x
= simplify_and_const_int (NULL_RTX
, shift_mode
, x
,
11074 GET_MODE_MASK (result_mode
) >> orig_count
);
11076 /* Do the remainder of the processing in RESULT_MODE. */
11077 x
= gen_lowpart_or_truncate (result_mode
, x
);
11079 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11082 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11084 if (outer_op
!= UNKNOWN
)
11086 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11087 && GET_MODE_PRECISION (result_mode
) < HOST_BITS_PER_WIDE_INT
)
11088 outer_const
= trunc_int_for_mode (outer_const
, result_mode
);
11090 if (outer_op
== AND
)
11091 x
= simplify_and_const_int (NULL_RTX
, result_mode
, x
, outer_const
);
11092 else if (outer_op
== SET
)
11094 /* This means that we have determined that the result is
11095 equivalent to a constant. This should be rare. */
11096 if (!side_effects_p (x
))
11097 x
= GEN_INT (outer_const
);
11099 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11100 x
= simplify_gen_unary (outer_op
, result_mode
, x
, result_mode
);
11102 x
= simplify_gen_binary (outer_op
, result_mode
, x
,
11103 GEN_INT (outer_const
));
11109 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11110 The result of the shift is RESULT_MODE. If we cannot simplify it,
11111 return X or, if it is NULL, synthesize the expression with
11112 simplify_gen_binary. Otherwise, return a simplified value.
11114 The shift is normally computed in the widest mode we find in VAROP, as
11115 long as it isn't a different number of words than RESULT_MODE. Exceptions
11116 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11119 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11120 rtx varop
, int count
)
11122 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11127 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
11128 if (GET_MODE (x
) != result_mode
)
11129 x
= gen_lowpart (result_mode
, x
);
11134 /* A subroutine of recog_for_combine. See there for arguments and
11138 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11140 rtx pat
= *pnewpat
;
11141 rtx pat_without_clobbers
;
11142 int insn_code_number
;
11143 int num_clobbers_to_add
= 0;
11145 rtx notes
= NULL_RTX
;
11146 rtx old_notes
, old_pat
;
11149 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11150 we use to indicate that something didn't match. If we find such a
11151 thing, force rejection. */
11152 if (GET_CODE (pat
) == PARALLEL
)
11153 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11154 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11155 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11158 old_pat
= PATTERN (insn
);
11159 old_notes
= REG_NOTES (insn
);
11160 PATTERN (insn
) = pat
;
11161 REG_NOTES (insn
) = NULL_RTX
;
11163 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11164 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11166 if (insn_code_number
< 0)
11167 fputs ("Failed to match this instruction:\n", dump_file
);
11169 fputs ("Successfully matched this instruction:\n", dump_file
);
11170 print_rtl_single (dump_file
, pat
);
11173 /* If it isn't, there is the possibility that we previously had an insn
11174 that clobbered some register as a side effect, but the combined
11175 insn doesn't need to do that. So try once more without the clobbers
11176 unless this represents an ASM insn. */
11178 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11179 && GET_CODE (pat
) == PARALLEL
)
11183 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11184 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11187 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11191 SUBST_INT (XVECLEN (pat
, 0), pos
);
11194 pat
= XVECEXP (pat
, 0, 0);
11196 PATTERN (insn
) = pat
;
11197 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11198 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11200 if (insn_code_number
< 0)
11201 fputs ("Failed to match this instruction:\n", dump_file
);
11203 fputs ("Successfully matched this instruction:\n", dump_file
);
11204 print_rtl_single (dump_file
, pat
);
11208 pat_without_clobbers
= pat
;
11210 PATTERN (insn
) = old_pat
;
11211 REG_NOTES (insn
) = old_notes
;
11213 /* Recognize all noop sets, these will be killed by followup pass. */
11214 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11215 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11217 /* If we had any clobbers to add, make a new pattern than contains
11218 them. Then check to make sure that all of them are dead. */
11219 if (num_clobbers_to_add
)
11221 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11222 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11223 ? (XVECLEN (pat
, 0)
11224 + num_clobbers_to_add
)
11225 : num_clobbers_to_add
+ 1));
11227 if (GET_CODE (pat
) == PARALLEL
)
11228 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11229 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11231 XVECEXP (newpat
, 0, 0) = pat
;
11233 add_clobbers (newpat
, insn_code_number
);
11235 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11236 i
< XVECLEN (newpat
, 0); i
++)
11238 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11239 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11241 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11243 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11244 notes
= alloc_reg_note (REG_UNUSED
,
11245 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11251 if (insn_code_number
>= 0
11252 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11254 old_pat
= PATTERN (insn
);
11255 old_notes
= REG_NOTES (insn
);
11256 old_icode
= INSN_CODE (insn
);
11257 PATTERN (insn
) = pat
;
11258 REG_NOTES (insn
) = notes
;
11259 INSN_CODE (insn
) = insn_code_number
;
11261 /* Allow targets to reject combined insn. */
11262 if (!targetm
.legitimate_combined_insn (insn
))
11264 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11265 fputs ("Instruction not appropriate for target.",
11268 /* Callers expect recog_for_combine to strip
11269 clobbers from the pattern on failure. */
11270 pat
= pat_without_clobbers
;
11273 insn_code_number
= -1;
11276 PATTERN (insn
) = old_pat
;
11277 REG_NOTES (insn
) = old_notes
;
11278 INSN_CODE (insn
) = old_icode
;
11284 return insn_code_number
;
11287 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11288 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11289 Return whether anything was so changed. */
11292 change_zero_ext (rtx pat
)
11294 bool changed
= false;
11295 rtx
*src
= &SET_SRC (pat
);
11297 subrtx_ptr_iterator::array_type array
;
11298 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11301 scalar_int_mode mode
, inner_mode
;
11302 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11306 if (GET_CODE (x
) == ZERO_EXTRACT
11307 && CONST_INT_P (XEXP (x
, 1))
11308 && CONST_INT_P (XEXP (x
, 2))
11309 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11310 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11312 size
= INTVAL (XEXP (x
, 1));
11314 int start
= INTVAL (XEXP (x
, 2));
11315 if (BITS_BIG_ENDIAN
)
11316 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11319 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0), GEN_INT (start
));
11322 if (mode
!= inner_mode
)
11323 x
= gen_lowpart_SUBREG (mode
, x
);
11325 else if (GET_CODE (x
) == ZERO_EXTEND
11326 && GET_CODE (XEXP (x
, 0)) == SUBREG
11327 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11328 && !paradoxical_subreg_p (XEXP (x
, 0))
11329 && subreg_lowpart_p (XEXP (x
, 0)))
11331 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11332 x
= SUBREG_REG (XEXP (x
, 0));
11333 if (GET_MODE (x
) != mode
)
11334 x
= gen_lowpart_SUBREG (mode
, x
);
11336 else if (GET_CODE (x
) == ZERO_EXTEND
11337 && REG_P (XEXP (x
, 0))
11338 && HARD_REGISTER_P (XEXP (x
, 0))
11339 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11341 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11342 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11347 if (!(GET_CODE (x
) == LSHIFTRT
11348 && CONST_INT_P (XEXP (x
, 1))
11349 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11351 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11352 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11360 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11361 maybe_swap_commutative_operands (**iter
);
11363 rtx
*dst
= &SET_DEST (pat
);
11364 scalar_int_mode mode
;
11365 if (GET_CODE (*dst
) == ZERO_EXTRACT
11366 && REG_P (XEXP (*dst
, 0))
11367 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11368 && CONST_INT_P (XEXP (*dst
, 1))
11369 && CONST_INT_P (XEXP (*dst
, 2)))
11371 rtx reg
= XEXP (*dst
, 0);
11372 int width
= INTVAL (XEXP (*dst
, 1));
11373 int offset
= INTVAL (XEXP (*dst
, 2));
11374 int reg_width
= GET_MODE_PRECISION (mode
);
11375 if (BITS_BIG_ENDIAN
)
11376 offset
= reg_width
- width
- offset
;
11379 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11380 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11381 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11383 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11386 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11387 w
= gen_rtx_IOR (mode
, x
, z
);
11388 SUBST (SET_DEST (pat
), reg
);
11389 SUBST (SET_SRC (pat
), w
);
11397 /* Like recog, but we receive the address of a pointer to a new pattern.
11398 We try to match the rtx that the pointer points to.
11399 If that fails, we may try to modify or replace the pattern,
11400 storing the replacement into the same pointer object.
11402 Modifications include deletion or addition of CLOBBERs. If the
11403 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11404 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11405 (and undo if that fails).
11407 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11408 the CLOBBERs are placed.
11410 The value is the final insn code from the pattern ultimately matched,
11414 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11416 rtx pat
= *pnewpat
;
11417 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11418 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11419 return insn_code_number
;
11421 void *marker
= get_undo_marker ();
11422 bool changed
= false;
11424 if (GET_CODE (pat
) == SET
)
11425 changed
= change_zero_ext (pat
);
11426 else if (GET_CODE (pat
) == PARALLEL
)
11429 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11431 rtx set
= XVECEXP (pat
, 0, i
);
11432 if (GET_CODE (set
) == SET
)
11433 changed
|= change_zero_ext (set
);
11439 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11441 if (insn_code_number
< 0)
11442 undo_to_marker (marker
);
11445 return insn_code_number
;
11448 /* Like gen_lowpart_general but for use by combine. In combine it
11449 is not possible to create any new pseudoregs. However, it is
11450 safe to create invalid memory addresses, because combine will
11451 try to recognize them and all they will do is make the combine
11454 If for some reason this cannot do its job, an rtx
11455 (clobber (const_int 0)) is returned.
11456 An insn containing that will not be recognized. */
11459 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11461 machine_mode imode
= GET_MODE (x
);
11462 unsigned int osize
= GET_MODE_SIZE (omode
);
11463 unsigned int isize
= GET_MODE_SIZE (imode
);
11466 if (omode
== imode
)
11469 /* We can only support MODE being wider than a word if X is a
11470 constant integer or has a mode the same size. */
11471 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11472 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11475 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11476 won't know what to do. So we will strip off the SUBREG here and
11477 process normally. */
11478 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11480 x
= SUBREG_REG (x
);
11482 /* For use in case we fall down into the address adjustments
11483 further below, we need to adjust the known mode and size of
11484 x; imode and isize, since we just adjusted x. */
11485 imode
= GET_MODE (x
);
11487 if (imode
== omode
)
11490 isize
= GET_MODE_SIZE (imode
);
11493 result
= gen_lowpart_common (omode
, x
);
11502 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11504 if (MEM_VOLATILE_P (x
)
11505 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11508 /* If we want to refer to something bigger than the original memref,
11509 generate a paradoxical subreg instead. That will force a reload
11510 of the original memref X. */
11511 if (paradoxical_subreg_p (omode
, imode
))
11512 return gen_rtx_SUBREG (omode
, x
, 0);
11514 if (WORDS_BIG_ENDIAN
)
11515 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11517 /* Adjust the address so that the address-after-the-data is
11519 if (BYTES_BIG_ENDIAN
)
11520 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11522 return adjust_address_nv (x
, omode
, offset
);
11525 /* If X is a comparison operator, rewrite it in a new mode. This
11526 probably won't match, but may allow further simplifications. */
11527 else if (COMPARISON_P (x
))
11528 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11530 /* If we couldn't simplify X any other way, just enclose it in a
11531 SUBREG. Normally, this SUBREG won't match, but some patterns may
11532 include an explicit SUBREG or we may simplify it further in combine. */
11537 if (imode
== VOIDmode
)
11539 imode
= int_mode_for_mode (omode
).require ();
11540 x
= gen_lowpart_common (imode
, x
);
11544 res
= lowpart_subreg (omode
, x
, imode
);
11550 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11553 /* Try to simplify a comparison between OP0 and a constant OP1,
11554 where CODE is the comparison code that will be tested, into a
11555 (CODE OP0 const0_rtx) form.
11557 The result is a possibly different comparison code to use.
11558 *POP1 may be updated. */
11560 static enum rtx_code
11561 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11562 rtx op0
, rtx
*pop1
)
11564 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11565 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11567 /* Get the constant we are comparing against and turn off all bits
11568 not on in our mode. */
11569 if (mode
!= VOIDmode
)
11570 const_op
= trunc_int_for_mode (const_op
, mode
);
11572 /* If we are comparing against a constant power of two and the value
11573 being compared can only have that single bit nonzero (e.g., it was
11574 `and'ed with that bit), we can replace this with a comparison
11577 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11578 || code
== LT
|| code
== LTU
)
11579 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11580 && pow2p_hwi (const_op
& GET_MODE_MASK (mode
))
11581 && (nonzero_bits (op0
, mode
)
11582 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (mode
))))
11584 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11588 /* Similarly, if we are comparing a value known to be either -1 or
11589 0 with -1, change it to the opposite comparison against zero. */
11591 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11592 || code
== GEU
|| code
== LTU
)
11593 && num_sign_bit_copies (op0
, mode
) == mode_width
)
11595 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11599 /* Do some canonicalizations based on the comparison code. We prefer
11600 comparisons against zero and then prefer equality comparisons.
11601 If we can reduce the size of a constant, we will do that too. */
11605 /* < C is equivalent to <= (C - 1) */
11610 /* ... fall through to LE case below. */
11611 gcc_fallthrough ();
11617 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11624 /* If we are doing a <= 0 comparison on a value known to have
11625 a zero sign bit, we can replace this with == 0. */
11626 else if (const_op
== 0
11627 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11628 && (nonzero_bits (op0
, mode
)
11629 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11635 /* >= C is equivalent to > (C - 1). */
11640 /* ... fall through to GT below. */
11641 gcc_fallthrough ();
11647 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11654 /* If we are doing a > 0 comparison on a value known to have
11655 a zero sign bit, we can replace this with != 0. */
11656 else if (const_op
== 0
11657 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11658 && (nonzero_bits (op0
, mode
)
11659 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11665 /* < C is equivalent to <= (C - 1). */
11670 /* ... fall through ... */
11672 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11673 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11674 && (unsigned HOST_WIDE_INT
) const_op
11675 == HOST_WIDE_INT_1U
<< (mode_width
- 1))
11685 /* unsigned <= 0 is equivalent to == 0 */
11688 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11689 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11690 && (unsigned HOST_WIDE_INT
) const_op
11691 == (HOST_WIDE_INT_1U
<< (mode_width
- 1)) - 1)
11699 /* >= C is equivalent to > (C - 1). */
11704 /* ... fall through ... */
11707 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11708 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11709 && (unsigned HOST_WIDE_INT
) const_op
11710 == HOST_WIDE_INT_1U
<< (mode_width
- 1))
11720 /* unsigned > 0 is equivalent to != 0 */
11723 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11724 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11725 && (unsigned HOST_WIDE_INT
) const_op
11726 == (HOST_WIDE_INT_1U
<< (mode_width
- 1)) - 1)
11737 *pop1
= GEN_INT (const_op
);
11741 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11742 comparison code that will be tested.
11744 The result is a possibly different comparison code to use. *POP0 and
11745 *POP1 may be updated.
11747 It is possible that we might detect that a comparison is either always
11748 true or always false. However, we do not perform general constant
11749 folding in combine, so this knowledge isn't useful. Such tautologies
11750 should have been detected earlier. Hence we ignore all such cases. */
11752 static enum rtx_code
11753 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11759 scalar_int_mode mode
, inner_mode
;
11760 machine_mode tmode
;
11762 /* Try a few ways of applying the same transformation to both operands. */
11765 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11766 so check specially. */
11767 if (!WORD_REGISTER_OPERATIONS
11768 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11769 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11770 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11771 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11772 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11773 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11774 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
11775 && (is_a
<scalar_int_mode
>
11776 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
11777 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
11778 && CONST_INT_P (XEXP (op0
, 1))
11779 && XEXP (op0
, 1) == XEXP (op1
, 1)
11780 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11781 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11782 && (INTVAL (XEXP (op0
, 1))
11783 == (GET_MODE_PRECISION (mode
)
11784 - GET_MODE_PRECISION (inner_mode
))))
11786 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11787 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11790 /* If both operands are the same constant shift, see if we can ignore the
11791 shift. We can if the shift is a rotate or if the bits shifted out of
11792 this shift are known to be zero for both inputs and if the type of
11793 comparison is compatible with the shift. */
11794 if (GET_CODE (op0
) == GET_CODE (op1
)
11795 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11796 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11797 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11798 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11799 || (GET_CODE (op0
) == ASHIFTRT
11800 && (code
!= GTU
&& code
!= LTU
11801 && code
!= GEU
&& code
!= LEU
)))
11802 && CONST_INT_P (XEXP (op0
, 1))
11803 && INTVAL (XEXP (op0
, 1)) >= 0
11804 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11805 && XEXP (op0
, 1) == XEXP (op1
, 1))
11807 machine_mode mode
= GET_MODE (op0
);
11808 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11809 int shift_count
= INTVAL (XEXP (op0
, 1));
11811 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11812 mask
&= (mask
>> shift_count
) << shift_count
;
11813 else if (GET_CODE (op0
) == ASHIFT
)
11814 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11816 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11817 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11818 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11823 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11824 SUBREGs are of the same mode, and, in both cases, the AND would
11825 be redundant if the comparison was done in the narrower mode,
11826 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11827 and the operand's possibly nonzero bits are 0xffffff01; in that case
11828 if we only care about QImode, we don't need the AND). This case
11829 occurs if the output mode of an scc insn is not SImode and
11830 STORE_FLAG_VALUE == 1 (e.g., the 386).
11832 Similarly, check for a case where the AND's are ZERO_EXTEND
11833 operations from some narrower mode even though a SUBREG is not
11836 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11837 && CONST_INT_P (XEXP (op0
, 1))
11838 && CONST_INT_P (XEXP (op1
, 1)))
11840 rtx inner_op0
= XEXP (op0
, 0);
11841 rtx inner_op1
= XEXP (op1
, 0);
11842 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11843 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11846 if (paradoxical_subreg_p (inner_op0
)
11847 && GET_CODE (inner_op1
) == SUBREG
11848 && (GET_MODE (SUBREG_REG (inner_op0
))
11849 == GET_MODE (SUBREG_REG (inner_op1
)))
11850 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11851 <= HOST_BITS_PER_WIDE_INT
)
11852 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11853 GET_MODE (SUBREG_REG (inner_op0
)))))
11854 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11855 GET_MODE (SUBREG_REG (inner_op1
))))))
11857 op0
= SUBREG_REG (inner_op0
);
11858 op1
= SUBREG_REG (inner_op1
);
11860 /* The resulting comparison is always unsigned since we masked
11861 off the original sign bit. */
11862 code
= unsigned_condition (code
);
11868 FOR_EACH_MODE_UNTIL (tmode
, GET_MODE (op0
))
11869 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11871 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
11872 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
11873 code
= unsigned_condition (code
);
11882 /* If both operands are NOT, we can strip off the outer operation
11883 and adjust the comparison code for swapped operands; similarly for
11884 NEG, except that this must be an equality comparison. */
11885 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11886 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11887 && (code
== EQ
|| code
== NE
)))
11888 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11894 /* If the first operand is a constant, swap the operands and adjust the
11895 comparison code appropriately, but don't do this if the second operand
11896 is already a constant integer. */
11897 if (swap_commutative_operands_p (op0
, op1
))
11899 std::swap (op0
, op1
);
11900 code
= swap_condition (code
);
11903 /* We now enter a loop during which we will try to simplify the comparison.
11904 For the most part, we only are concerned with comparisons with zero,
11905 but some things may really be comparisons with zero but not start
11906 out looking that way. */
11908 while (CONST_INT_P (op1
))
11910 machine_mode mode
= GET_MODE (op0
);
11911 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11912 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11913 int equality_comparison_p
;
11914 int sign_bit_comparison_p
;
11915 int unsigned_comparison_p
;
11916 HOST_WIDE_INT const_op
;
11918 /* We only want to handle integral modes. This catches VOIDmode,
11919 CCmode, and the floating-point modes. An exception is that we
11920 can handle VOIDmode if OP0 is a COMPARE or a comparison
11923 if (GET_MODE_CLASS (mode
) != MODE_INT
11924 && ! (mode
== VOIDmode
11925 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
11928 /* Try to simplify the compare to constant, possibly changing the
11929 comparison op, and/or changing op1 to zero. */
11930 code
= simplify_compare_const (code
, mode
, op0
, &op1
);
11931 const_op
= INTVAL (op1
);
11933 /* Compute some predicates to simplify code below. */
11935 equality_comparison_p
= (code
== EQ
|| code
== NE
);
11936 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
11937 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
11940 /* If this is a sign bit comparison and we can do arithmetic in
11941 MODE, say that we will only be needing the sign bit of OP0. */
11942 if (sign_bit_comparison_p
&& HWI_COMPUTABLE_MODE_P (mode
))
11943 op0
= force_to_mode (op0
, mode
,
11945 << (GET_MODE_PRECISION (mode
) - 1),
11948 /* Now try cases based on the opcode of OP0. If none of the cases
11949 does a "continue", we exit this loop immediately after the
11952 switch (GET_CODE (op0
))
11955 /* If we are extracting a single bit from a variable position in
11956 a constant that has only a single bit set and are comparing it
11957 with zero, we can convert this into an equality comparison
11958 between the position and the location of the single bit. */
11959 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11960 have already reduced the shift count modulo the word size. */
11961 if (!SHIFT_COUNT_TRUNCATED
11962 && CONST_INT_P (XEXP (op0
, 0))
11963 && XEXP (op0
, 1) == const1_rtx
11964 && equality_comparison_p
&& const_op
== 0
11965 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
11967 if (BITS_BIG_ENDIAN
)
11968 i
= BITS_PER_WORD
- 1 - i
;
11970 op0
= XEXP (op0
, 2);
11974 /* Result is nonzero iff shift count is equal to I. */
11975 code
= reverse_condition (code
);
11982 tem
= expand_compound_operation (op0
);
11991 /* If testing for equality, we can take the NOT of the constant. */
11992 if (equality_comparison_p
11993 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
11995 op0
= XEXP (op0
, 0);
12000 /* If just looking at the sign bit, reverse the sense of the
12002 if (sign_bit_comparison_p
)
12004 op0
= XEXP (op0
, 0);
12005 code
= (code
== GE
? LT
: GE
);
12011 /* If testing for equality, we can take the NEG of the constant. */
12012 if (equality_comparison_p
12013 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12015 op0
= XEXP (op0
, 0);
12020 /* The remaining cases only apply to comparisons with zero. */
12024 /* When X is ABS or is known positive,
12025 (neg X) is < 0 if and only if X != 0. */
12027 if (sign_bit_comparison_p
12028 && (GET_CODE (XEXP (op0
, 0)) == ABS
12029 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12030 && (nonzero_bits (XEXP (op0
, 0), mode
)
12031 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12034 op0
= XEXP (op0
, 0);
12035 code
= (code
== LT
? NE
: EQ
);
12039 /* If we have NEG of something whose two high-order bits are the
12040 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12041 if (num_sign_bit_copies (op0
, mode
) >= 2)
12043 op0
= XEXP (op0
, 0);
12044 code
= swap_condition (code
);
12050 /* If we are testing equality and our count is a constant, we
12051 can perform the inverse operation on our RHS. */
12052 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12053 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12054 op1
, XEXP (op0
, 1))) != 0)
12056 op0
= XEXP (op0
, 0);
12061 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12062 a particular bit. Convert it to an AND of a constant of that
12063 bit. This will be converted into a ZERO_EXTRACT. */
12064 if (const_op
== 0 && sign_bit_comparison_p
12065 && CONST_INT_P (XEXP (op0
, 1))
12066 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12068 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12071 - INTVAL (XEXP (op0
, 1)))));
12072 code
= (code
== LT
? NE
: EQ
);
12076 /* Fall through. */
12079 /* ABS is ignorable inside an equality comparison with zero. */
12080 if (const_op
== 0 && equality_comparison_p
)
12082 op0
= XEXP (op0
, 0);
12088 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12089 (compare FOO CONST) if CONST fits in FOO's mode and we
12090 are either testing inequality or have an unsigned
12091 comparison with ZERO_EXTEND or a signed comparison with
12092 SIGN_EXTEND. But don't do it if we don't have a compare
12093 insn of the given mode, since we'd have to revert it
12094 later on, and then we wouldn't know whether to sign- or
12096 mode
= GET_MODE (XEXP (op0
, 0));
12097 if (GET_MODE_CLASS (mode
) == MODE_INT
12098 && ! unsigned_comparison_p
12099 && HWI_COMPUTABLE_MODE_P (mode
)
12100 && trunc_int_for_mode (const_op
, mode
) == const_op
12101 && have_insn_for (COMPARE
, mode
))
12103 op0
= XEXP (op0
, 0);
12109 /* Check for the case where we are comparing A - C1 with C2, that is
12111 (subreg:MODE (plus (A) (-C1))) op (C2)
12113 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12114 comparison in the wider mode. One of the following two conditions
12115 must be true in order for this to be valid:
12117 1. The mode extension results in the same bit pattern being added
12118 on both sides and the comparison is equality or unsigned. As
12119 C2 has been truncated to fit in MODE, the pattern can only be
12122 2. The mode extension results in the sign bit being copied on
12125 The difficulty here is that we have predicates for A but not for
12126 (A - C1) so we need to check that C1 is within proper bounds so
12127 as to perturbate A as little as possible. */
12129 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12130 && subreg_lowpart_p (op0
)
12131 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) > mode_width
12132 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12133 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12135 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
12136 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12137 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12140 && (unsigned HOST_WIDE_INT
) c1
12141 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12142 && (equality_comparison_p
|| unsigned_comparison_p
)
12143 /* (A - C1) zero-extends if it is positive and sign-extends
12144 if it is negative, C2 both zero- and sign-extends. */
12145 && ((0 == (nonzero_bits (a
, inner_mode
)
12146 & ~GET_MODE_MASK (mode
))
12148 /* (A - C1) sign-extends if it is positive and 1-extends
12149 if it is negative, C2 both sign- and 1-extends. */
12150 || (num_sign_bit_copies (a
, inner_mode
)
12151 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12154 || ((unsigned HOST_WIDE_INT
) c1
12155 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12156 /* (A - C1) always sign-extends, like C2. */
12157 && num_sign_bit_copies (a
, inner_mode
)
12158 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12159 - (mode_width
- 1))))
12161 op0
= SUBREG_REG (op0
);
12166 /* If the inner mode is narrower and we are extracting the low part,
12167 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12168 if (paradoxical_subreg_p (op0
))
12170 else if (subreg_lowpart_p (op0
)
12171 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12172 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12173 && (code
== NE
|| code
== EQ
)
12174 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12175 && !paradoxical_subreg_p (op0
)
12176 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12177 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12179 /* Remove outer subregs that don't do anything. */
12180 tem
= gen_lowpart (inner_mode
, op1
);
12182 if ((nonzero_bits (tem
, inner_mode
)
12183 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12185 op0
= SUBREG_REG (op0
);
12197 mode
= GET_MODE (XEXP (op0
, 0));
12198 if (GET_MODE_CLASS (mode
) == MODE_INT
12199 && (unsigned_comparison_p
|| equality_comparison_p
)
12200 && HWI_COMPUTABLE_MODE_P (mode
)
12201 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12203 && have_insn_for (COMPARE
, mode
))
12205 op0
= XEXP (op0
, 0);
12211 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12212 this for equality comparisons due to pathological cases involving
12214 if (equality_comparison_p
12215 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12216 op1
, XEXP (op0
, 1))))
12218 op0
= XEXP (op0
, 0);
12223 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12224 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12225 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12227 op0
= XEXP (XEXP (op0
, 0), 0);
12228 code
= (code
== LT
? EQ
: NE
);
12234 /* We used to optimize signed comparisons against zero, but that
12235 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12236 arrive here as equality comparisons, or (GEU, LTU) are
12237 optimized away. No need to special-case them. */
12239 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12240 (eq B (minus A C)), whichever simplifies. We can only do
12241 this for equality comparisons due to pathological cases involving
12243 if (equality_comparison_p
12244 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
12245 XEXP (op0
, 1), op1
)))
12247 op0
= XEXP (op0
, 0);
12252 if (equality_comparison_p
12253 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12254 XEXP (op0
, 0), op1
)))
12256 op0
= XEXP (op0
, 1);
12261 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12262 of bits in X minus 1, is one iff X > 0. */
12263 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12264 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12265 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12266 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12268 op0
= XEXP (op0
, 1);
12269 code
= (code
== GE
? LE
: GT
);
12275 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12276 if C is zero or B is a constant. */
12277 if (equality_comparison_p
12278 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
12279 XEXP (op0
, 1), op1
)))
12281 op0
= XEXP (op0
, 0);
12288 case UNEQ
: case LTGT
:
12289 case LT
: case LTU
: case UNLT
: case LE
: case LEU
: case UNLE
:
12290 case GT
: case GTU
: case UNGT
: case GE
: case GEU
: case UNGE
:
12291 case UNORDERED
: case ORDERED
:
12292 /* We can't do anything if OP0 is a condition code value, rather
12293 than an actual data value. */
12295 || CC0_P (XEXP (op0
, 0))
12296 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12299 /* Get the two operands being compared. */
12300 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12301 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12303 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12305 /* Check for the cases where we simply want the result of the
12306 earlier test or the opposite of that result. */
12307 if (code
== NE
|| code
== EQ
12308 || (val_signbit_known_set_p (GET_MODE (op0
), STORE_FLAG_VALUE
)
12309 && (code
== LT
|| code
== GE
)))
12311 enum rtx_code new_code
;
12312 if (code
== LT
|| code
== NE
)
12313 new_code
= GET_CODE (op0
);
12315 new_code
= reversed_comparison_code (op0
, NULL
);
12317 if (new_code
!= UNKNOWN
)
12328 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12330 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12331 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12332 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12334 op0
= XEXP (op0
, 1);
12335 code
= (code
== GE
? GT
: LE
);
12341 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12342 will be converted to a ZERO_EXTRACT later. */
12343 if (const_op
== 0 && equality_comparison_p
12344 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12345 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12347 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12348 XEXP (XEXP (op0
, 0), 1));
12349 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12353 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12354 zero and X is a comparison and C1 and C2 describe only bits set
12355 in STORE_FLAG_VALUE, we can compare with X. */
12356 if (const_op
== 0 && equality_comparison_p
12357 && mode_width
<= HOST_BITS_PER_WIDE_INT
12358 && CONST_INT_P (XEXP (op0
, 1))
12359 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12360 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12361 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12362 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12364 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12365 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12366 if ((~STORE_FLAG_VALUE
& mask
) == 0
12367 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12368 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12369 && COMPARISON_P (tem
))))
12371 op0
= XEXP (XEXP (op0
, 0), 0);
12376 /* If we are doing an equality comparison of an AND of a bit equal
12377 to the sign bit, replace this with a LT or GE comparison of
12378 the underlying value. */
12379 if (equality_comparison_p
12381 && CONST_INT_P (XEXP (op0
, 1))
12382 && mode_width
<= HOST_BITS_PER_WIDE_INT
12383 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12384 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12386 op0
= XEXP (op0
, 0);
12387 code
= (code
== EQ
? GE
: LT
);
12391 /* If this AND operation is really a ZERO_EXTEND from a narrower
12392 mode, the constant fits within that mode, and this is either an
12393 equality or unsigned comparison, try to do this comparison in
12398 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12399 -> (ne:DI (reg:SI 4) (const_int 0))
12401 unless TRULY_NOOP_TRUNCATION allows it or the register is
12402 known to hold a value of the required mode the
12403 transformation is invalid. */
12404 if ((equality_comparison_p
|| unsigned_comparison_p
)
12405 && CONST_INT_P (XEXP (op0
, 1))
12406 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12407 & GET_MODE_MASK (mode
))
12409 && const_op
>> i
== 0
12410 && int_mode_for_size (i
, 1).exists (&tmode
))
12412 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12416 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12417 fits in both M1 and M2 and the SUBREG is either paradoxical
12418 or represents the low part, permute the SUBREG and the AND
12420 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12421 && CONST_INT_P (XEXP (op0
, 1)))
12423 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12424 /* Require an integral mode, to avoid creating something like
12426 if ((is_a
<scalar_int_mode
>
12427 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12428 /* It is unsafe to commute the AND into the SUBREG if the
12429 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12430 not defined. As originally written the upper bits
12431 have a defined value due to the AND operation.
12432 However, if we commute the AND inside the SUBREG then
12433 they no longer have defined values and the meaning of
12434 the code has been changed.
12435 Also C1 should not change value in the smaller mode,
12436 see PR67028 (a positive C1 can become negative in the
12437 smaller mode, so that the AND does no longer mask the
12439 && ((WORD_REGISTER_OPERATIONS
12440 && mode_width
> GET_MODE_PRECISION (tmode
)
12441 && mode_width
<= BITS_PER_WORD
12442 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12443 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12444 && subreg_lowpart_p (XEXP (op0
, 0))))
12445 && mode_width
<= HOST_BITS_PER_WIDE_INT
12446 && HWI_COMPUTABLE_MODE_P (tmode
)
12447 && (c1
& ~mask
) == 0
12448 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12450 && c1
!= GET_MODE_MASK (tmode
))
12452 op0
= simplify_gen_binary (AND
, tmode
,
12453 SUBREG_REG (XEXP (op0
, 0)),
12454 gen_int_mode (c1
, tmode
));
12455 op0
= gen_lowpart (mode
, op0
);
12460 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12461 if (const_op
== 0 && equality_comparison_p
12462 && XEXP (op0
, 1) == const1_rtx
12463 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12465 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12466 XEXP (XEXP (op0
, 0), 0), 1);
12467 code
= (code
== NE
? EQ
: NE
);
12471 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12472 (eq (and (lshiftrt X) 1) 0).
12473 Also handle the case where (not X) is expressed using xor. */
12474 if (const_op
== 0 && equality_comparison_p
12475 && XEXP (op0
, 1) == const1_rtx
12476 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12478 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12479 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12481 if (GET_CODE (shift_op
) == NOT
12482 || (GET_CODE (shift_op
) == XOR
12483 && CONST_INT_P (XEXP (shift_op
, 1))
12484 && CONST_INT_P (shift_count
)
12485 && HWI_COMPUTABLE_MODE_P (mode
)
12486 && (UINTVAL (XEXP (shift_op
, 1))
12487 == HOST_WIDE_INT_1U
12488 << INTVAL (shift_count
))))
12491 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12492 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12493 code
= (code
== NE
? EQ
: NE
);
12500 /* If we have (compare (ashift FOO N) (const_int C)) and
12501 the high order N bits of FOO (N+1 if an inequality comparison)
12502 are known to be zero, we can do this by comparing FOO with C
12503 shifted right N bits so long as the low-order N bits of C are
12505 if (CONST_INT_P (XEXP (op0
, 1))
12506 && INTVAL (XEXP (op0
, 1)) >= 0
12507 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12508 < HOST_BITS_PER_WIDE_INT
)
12509 && (((unsigned HOST_WIDE_INT
) const_op
12510 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12512 && mode_width
<= HOST_BITS_PER_WIDE_INT
12513 && (nonzero_bits (XEXP (op0
, 0), mode
)
12514 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12515 + ! equality_comparison_p
))) == 0)
12517 /* We must perform a logical shift, not an arithmetic one,
12518 as we want the top N bits of C to be zero. */
12519 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12521 temp
>>= INTVAL (XEXP (op0
, 1));
12522 op1
= gen_int_mode (temp
, mode
);
12523 op0
= XEXP (op0
, 0);
12527 /* If we are doing a sign bit comparison, it means we are testing
12528 a particular bit. Convert it to the appropriate AND. */
12529 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12530 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12532 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12535 - INTVAL (XEXP (op0
, 1)))));
12536 code
= (code
== LT
? NE
: EQ
);
12540 /* If this an equality comparison with zero and we are shifting
12541 the low bit to the sign bit, we can convert this to an AND of the
12543 if (const_op
== 0 && equality_comparison_p
12544 && CONST_INT_P (XEXP (op0
, 1))
12545 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12547 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12553 /* If this is an equality comparison with zero, we can do this
12554 as a logical shift, which might be much simpler. */
12555 if (equality_comparison_p
&& const_op
== 0
12556 && CONST_INT_P (XEXP (op0
, 1)))
12558 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12560 INTVAL (XEXP (op0
, 1)));
12564 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12565 do the comparison in a narrower mode. */
12566 if (! unsigned_comparison_p
12567 && CONST_INT_P (XEXP (op0
, 1))
12568 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12569 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12570 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12572 && (((unsigned HOST_WIDE_INT
) const_op
12573 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12574 <= GET_MODE_MASK (tmode
)))
12576 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12580 /* Likewise if OP0 is a PLUS of a sign extension with a
12581 constant, which is usually represented with the PLUS
12582 between the shifts. */
12583 if (! unsigned_comparison_p
12584 && CONST_INT_P (XEXP (op0
, 1))
12585 && GET_CODE (XEXP (op0
, 0)) == PLUS
12586 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12587 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12588 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12589 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12591 && (((unsigned HOST_WIDE_INT
) const_op
12592 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12593 <= GET_MODE_MASK (tmode
)))
12595 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12596 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12597 rtx new_const
= simplify_gen_binary (ASHIFTRT
, GET_MODE (op0
),
12598 add_const
, XEXP (op0
, 1));
12600 op0
= simplify_gen_binary (PLUS
, tmode
,
12601 gen_lowpart (tmode
, inner
),
12608 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12609 the low order N bits of FOO are known to be zero, we can do this
12610 by comparing FOO with C shifted left N bits so long as no
12611 overflow occurs. Even if the low order N bits of FOO aren't known
12612 to be zero, if the comparison is >= or < we can use the same
12613 optimization and for > or <= by setting all the low
12614 order N bits in the comparison constant. */
12615 if (CONST_INT_P (XEXP (op0
, 1))
12616 && INTVAL (XEXP (op0
, 1)) > 0
12617 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12618 && mode_width
<= HOST_BITS_PER_WIDE_INT
12619 && (((unsigned HOST_WIDE_INT
) const_op
12620 + (GET_CODE (op0
) != LSHIFTRT
12621 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12624 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12626 unsigned HOST_WIDE_INT low_bits
12627 = (nonzero_bits (XEXP (op0
, 0), mode
)
12628 & ((HOST_WIDE_INT_1U
12629 << INTVAL (XEXP (op0
, 1))) - 1));
12630 if (low_bits
== 0 || !equality_comparison_p
)
12632 /* If the shift was logical, then we must make the condition
12634 if (GET_CODE (op0
) == LSHIFTRT
)
12635 code
= unsigned_condition (code
);
12637 const_op
= (unsigned HOST_WIDE_INT
) const_op
12638 << INTVAL (XEXP (op0
, 1));
12640 && (code
== GT
|| code
== GTU
12641 || code
== LE
|| code
== LEU
))
12643 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12644 op1
= GEN_INT (const_op
);
12645 op0
= XEXP (op0
, 0);
12650 /* If we are using this shift to extract just the sign bit, we
12651 can replace this with an LT or GE comparison. */
12653 && (equality_comparison_p
|| sign_bit_comparison_p
)
12654 && CONST_INT_P (XEXP (op0
, 1))
12655 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12657 op0
= XEXP (op0
, 0);
12658 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12670 /* Now make any compound operations involved in this comparison. Then,
12671 check for an outmost SUBREG on OP0 that is not doing anything or is
12672 paradoxical. The latter transformation must only be performed when
12673 it is known that the "extra" bits will be the same in op0 and op1 or
12674 that they don't matter. There are three cases to consider:
12676 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12677 care bits and we can assume they have any convenient value. So
12678 making the transformation is safe.
12680 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12681 In this case the upper bits of op0 are undefined. We should not make
12682 the simplification in that case as we do not know the contents of
12685 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12686 In that case we know those bits are zeros or ones. We must also be
12687 sure that they are the same as the upper bits of op1.
12689 We can never remove a SUBREG for a non-equality comparison because
12690 the sign bit is in a different place in the underlying object. */
12692 rtx_code op0_mco_code
= SET
;
12693 if (op1
== const0_rtx
)
12694 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12696 op0
= make_compound_operation (op0
, op0_mco_code
);
12697 op1
= make_compound_operation (op1
, SET
);
12699 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12700 && is_int_mode (GET_MODE (op0
), &mode
)
12701 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12702 && (code
== NE
|| code
== EQ
))
12704 if (paradoxical_subreg_p (op0
))
12706 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12708 if (REG_P (SUBREG_REG (op0
)))
12710 op0
= SUBREG_REG (op0
);
12711 op1
= gen_lowpart (inner_mode
, op1
);
12714 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12715 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12716 & ~GET_MODE_MASK (mode
)) == 0)
12718 tem
= gen_lowpart (inner_mode
, op1
);
12720 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
12721 op0
= SUBREG_REG (op0
), op1
= tem
;
12725 /* We now do the opposite procedure: Some machines don't have compare
12726 insns in all modes. If OP0's mode is an integer mode smaller than a
12727 word and we can't do a compare in that mode, see if there is a larger
12728 mode for which we can do the compare. There are a number of cases in
12729 which we can use the wider mode. */
12731 if (is_int_mode (GET_MODE (op0
), &mode
)
12732 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12733 && ! have_insn_for (COMPARE
, mode
))
12734 FOR_EACH_WIDER_MODE (tmode
, mode
)
12736 if (!HWI_COMPUTABLE_MODE_P (tmode
))
12738 if (have_insn_for (COMPARE
, tmode
))
12742 /* If this is a test for negative, we can make an explicit
12743 test of the sign bit. Test this first so we can use
12744 a paradoxical subreg to extend OP0. */
12746 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12747 && HWI_COMPUTABLE_MODE_P (mode
))
12749 unsigned HOST_WIDE_INT sign
12750 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12751 op0
= simplify_gen_binary (AND
, tmode
,
12752 gen_lowpart (tmode
, op0
),
12753 gen_int_mode (sign
, tmode
));
12754 code
= (code
== LT
) ? NE
: EQ
;
12758 /* If the only nonzero bits in OP0 and OP1 are those in the
12759 narrower mode and this is an equality or unsigned comparison,
12760 we can use the wider mode. Similarly for sign-extended
12761 values, in which case it is true for all comparisons. */
12762 zero_extended
= ((code
== EQ
|| code
== NE
12763 || code
== GEU
|| code
== GTU
12764 || code
== LEU
|| code
== LTU
)
12765 && (nonzero_bits (op0
, tmode
)
12766 & ~GET_MODE_MASK (mode
)) == 0
12767 && ((CONST_INT_P (op1
)
12768 || (nonzero_bits (op1
, tmode
)
12769 & ~GET_MODE_MASK (mode
)) == 0)));
12772 || ((num_sign_bit_copies (op0
, tmode
)
12773 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12774 - GET_MODE_PRECISION (mode
)))
12775 && (num_sign_bit_copies (op1
, tmode
)
12776 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12777 - GET_MODE_PRECISION (mode
)))))
12779 /* If OP0 is an AND and we don't have an AND in MODE either,
12780 make a new AND in the proper mode. */
12781 if (GET_CODE (op0
) == AND
12782 && !have_insn_for (AND
, mode
))
12783 op0
= simplify_gen_binary (AND
, tmode
,
12784 gen_lowpart (tmode
,
12786 gen_lowpart (tmode
,
12792 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12794 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
12799 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12801 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
12810 /* We may have changed the comparison operands. Re-canonicalize. */
12811 if (swap_commutative_operands_p (op0
, op1
))
12813 std::swap (op0
, op1
);
12814 code
= swap_condition (code
);
12817 /* If this machine only supports a subset of valid comparisons, see if we
12818 can convert an unsupported one into a supported one. */
12819 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12827 /* Utility function for record_value_for_reg. Count number of
12832 enum rtx_code code
= GET_CODE (x
);
12836 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12837 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12839 rtx x0
= XEXP (x
, 0);
12840 rtx x1
= XEXP (x
, 1);
12843 return 1 + 2 * count_rtxs (x0
);
12845 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12846 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12847 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12848 return 2 + 2 * count_rtxs (x0
)
12849 + count_rtxs (x
== XEXP (x1
, 0)
12850 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12852 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12853 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12854 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12855 return 2 + 2 * count_rtxs (x1
)
12856 + count_rtxs (x
== XEXP (x0
, 0)
12857 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12860 fmt
= GET_RTX_FORMAT (code
);
12861 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12863 ret
+= count_rtxs (XEXP (x
, i
));
12864 else if (fmt
[i
] == 'E')
12865 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12866 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12871 /* Utility function for following routine. Called when X is part of a value
12872 being stored into last_set_value. Sets last_set_table_tick
12873 for each register mentioned. Similar to mention_regs in cse.c */
12876 update_table_tick (rtx x
)
12878 enum rtx_code code
= GET_CODE (x
);
12879 const char *fmt
= GET_RTX_FORMAT (code
);
12884 unsigned int regno
= REGNO (x
);
12885 unsigned int endregno
= END_REGNO (x
);
12888 for (r
= regno
; r
< endregno
; r
++)
12890 reg_stat_type
*rsp
= ®_stat
[r
];
12891 rsp
->last_set_table_tick
= label_tick
;
12897 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12900 /* Check for identical subexpressions. If x contains
12901 identical subexpression we only have to traverse one of
12903 if (i
== 0 && ARITHMETIC_P (x
))
12905 /* Note that at this point x1 has already been
12907 rtx x0
= XEXP (x
, 0);
12908 rtx x1
= XEXP (x
, 1);
12910 /* If x0 and x1 are identical then there is no need to
12915 /* If x0 is identical to a subexpression of x1 then while
12916 processing x1, x0 has already been processed. Thus we
12917 are done with x. */
12918 if (ARITHMETIC_P (x1
)
12919 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12922 /* If x1 is identical to a subexpression of x0 then we
12923 still have to process the rest of x0. */
12924 if (ARITHMETIC_P (x0
)
12925 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12927 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
12932 update_table_tick (XEXP (x
, i
));
12934 else if (fmt
[i
] == 'E')
12935 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12936 update_table_tick (XVECEXP (x
, i
, j
));
12939 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12940 are saying that the register is clobbered and we no longer know its
12941 value. If INSN is zero, don't update reg_stat[].last_set; this is
12942 only permitted with VALUE also zero and is used to invalidate the
12946 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
12948 unsigned int regno
= REGNO (reg
);
12949 unsigned int endregno
= END_REGNO (reg
);
12951 reg_stat_type
*rsp
;
12953 /* If VALUE contains REG and we have a previous value for REG, substitute
12954 the previous value. */
12955 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
12959 /* Set things up so get_last_value is allowed to see anything set up to
12961 subst_low_luid
= DF_INSN_LUID (insn
);
12962 tem
= get_last_value (reg
);
12964 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12965 it isn't going to be useful and will take a lot of time to process,
12966 so just use the CLOBBER. */
12970 if (ARITHMETIC_P (tem
)
12971 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
12972 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
12973 tem
= XEXP (tem
, 0);
12974 else if (count_occurrences (value
, reg
, 1) >= 2)
12976 /* If there are two or more occurrences of REG in VALUE,
12977 prevent the value from growing too much. */
12978 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
12979 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
12982 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
12986 /* For each register modified, show we don't know its value, that
12987 we don't know about its bitwise content, that its value has been
12988 updated, and that we don't know the location of the death of the
12990 for (i
= regno
; i
< endregno
; i
++)
12992 rsp
= ®_stat
[i
];
12995 rsp
->last_set
= insn
;
12997 rsp
->last_set_value
= 0;
12998 rsp
->last_set_mode
= VOIDmode
;
12999 rsp
->last_set_nonzero_bits
= 0;
13000 rsp
->last_set_sign_bit_copies
= 0;
13001 rsp
->last_death
= 0;
13002 rsp
->truncated_to_mode
= VOIDmode
;
13005 /* Mark registers that are being referenced in this value. */
13007 update_table_tick (value
);
13009 /* Now update the status of each register being set.
13010 If someone is using this register in this block, set this register
13011 to invalid since we will get confused between the two lives in this
13012 basic block. This makes using this register always invalid. In cse, we
13013 scan the table to invalidate all entries using this register, but this
13014 is too much work for us. */
13016 for (i
= regno
; i
< endregno
; i
++)
13018 rsp
= ®_stat
[i
];
13019 rsp
->last_set_label
= label_tick
;
13021 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13022 rsp
->last_set_invalid
= 1;
13024 rsp
->last_set_invalid
= 0;
13027 /* The value being assigned might refer to X (like in "x++;"). In that
13028 case, we must replace it with (clobber (const_int 0)) to prevent
13030 rsp
= ®_stat
[regno
];
13031 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13033 value
= copy_rtx (value
);
13034 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13038 /* For the main register being modified, update the value, the mode, the
13039 nonzero bits, and the number of sign bit copies. */
13041 rsp
->last_set_value
= value
;
13045 machine_mode mode
= GET_MODE (reg
);
13046 subst_low_luid
= DF_INSN_LUID (insn
);
13047 rsp
->last_set_mode
= mode
;
13048 if (GET_MODE_CLASS (mode
) == MODE_INT
13049 && HWI_COMPUTABLE_MODE_P (mode
))
13050 mode
= nonzero_bits_mode
;
13051 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13052 rsp
->last_set_sign_bit_copies
13053 = num_sign_bit_copies (value
, GET_MODE (reg
));
13057 /* Called via note_stores from record_dead_and_set_regs to handle one
13058 SET or CLOBBER in an insn. DATA is the instruction in which the
13059 set is occurring. */
13062 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13064 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13066 if (GET_CODE (dest
) == SUBREG
)
13067 dest
= SUBREG_REG (dest
);
13069 if (!record_dead_insn
)
13072 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13078 /* If we are setting the whole register, we know its value. Otherwise
13079 show that we don't know the value. We can handle SUBREG in
13081 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13082 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13083 else if (GET_CODE (setter
) == SET
13084 && GET_CODE (SET_DEST (setter
)) == SUBREG
13085 && SUBREG_REG (SET_DEST (setter
)) == dest
13086 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
13087 && subreg_lowpart_p (SET_DEST (setter
)))
13088 record_value_for_reg (dest
, record_dead_insn
,
13089 gen_lowpart (GET_MODE (dest
),
13090 SET_SRC (setter
)));
13092 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13094 else if (MEM_P (dest
)
13095 /* Ignore pushes, they clobber nothing. */
13096 && ! push_operand (dest
, GET_MODE (dest
)))
13097 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13100 /* Update the records of when each REG was most recently set or killed
13101 for the things done by INSN. This is the last thing done in processing
13102 INSN in the combiner loop.
13104 We update reg_stat[], in particular fields last_set, last_set_value,
13105 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13106 last_death, and also the similar information mem_last_set (which insn
13107 most recently modified memory) and last_call_luid (which insn was the
13108 most recent subroutine call). */
13111 record_dead_and_set_regs (rtx_insn
*insn
)
13116 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13118 if (REG_NOTE_KIND (link
) == REG_DEAD
13119 && REG_P (XEXP (link
, 0)))
13121 unsigned int regno
= REGNO (XEXP (link
, 0));
13122 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13124 for (i
= regno
; i
< endregno
; i
++)
13126 reg_stat_type
*rsp
;
13128 rsp
= ®_stat
[i
];
13129 rsp
->last_death
= insn
;
13132 else if (REG_NOTE_KIND (link
) == REG_INC
)
13133 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13138 hard_reg_set_iterator hrsi
;
13139 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13141 reg_stat_type
*rsp
;
13143 rsp
= ®_stat
[i
];
13144 rsp
->last_set_invalid
= 1;
13145 rsp
->last_set
= insn
;
13146 rsp
->last_set_value
= 0;
13147 rsp
->last_set_mode
= VOIDmode
;
13148 rsp
->last_set_nonzero_bits
= 0;
13149 rsp
->last_set_sign_bit_copies
= 0;
13150 rsp
->last_death
= 0;
13151 rsp
->truncated_to_mode
= VOIDmode
;
13154 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13156 /* We can't combine into a call pattern. Remember, though, that
13157 the return value register is set at this LUID. We could
13158 still replace a register with the return value from the
13159 wrong subroutine call! */
13160 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13163 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13166 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13167 register present in the SUBREG, so for each such SUBREG go back and
13168 adjust nonzero and sign bit information of the registers that are
13169 known to have some zero/sign bits set.
13171 This is needed because when combine blows the SUBREGs away, the
13172 information on zero/sign bits is lost and further combines can be
13173 missed because of that. */
13176 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13178 struct insn_link
*links
;
13180 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13181 machine_mode mode
= GET_MODE (subreg
);
13183 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
13186 for (links
= LOG_LINKS (insn
); links
;)
13188 reg_stat_type
*rsp
;
13190 insn
= links
->insn
;
13191 set
= single_set (insn
);
13193 if (! set
|| !REG_P (SET_DEST (set
))
13194 || REGNO (SET_DEST (set
)) != regno
13195 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13197 links
= links
->next
;
13201 rsp
= ®_stat
[regno
];
13202 if (rsp
->last_set
== insn
)
13204 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13205 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13208 if (REG_P (SET_SRC (set
)))
13210 regno
= REGNO (SET_SRC (set
));
13211 links
= LOG_LINKS (insn
);
13218 /* Check if X, a register, is known to contain a value already
13219 truncated to MODE. In this case we can use a subreg to refer to
13220 the truncated value even though in the generic case we would need
13221 an explicit truncation. */
13224 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13226 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13227 machine_mode truncated
= rsp
->truncated_to_mode
;
13230 || rsp
->truncation_label
< label_tick_ebb_start
)
13232 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
13234 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13239 /* If X is a hard reg or a subreg record the mode that the register is
13240 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
13241 to turn a truncate into a subreg using this information. Return true
13242 if traversing X is complete. */
13245 record_truncated_value (rtx x
)
13247 machine_mode truncated_mode
;
13248 reg_stat_type
*rsp
;
13250 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13252 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13253 truncated_mode
= GET_MODE (x
);
13255 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
13258 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13261 x
= SUBREG_REG (x
);
13263 /* ??? For hard-regs we now record everything. We might be able to
13264 optimize this using last_set_mode. */
13265 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13266 truncated_mode
= GET_MODE (x
);
13270 rsp
= ®_stat
[REGNO (x
)];
13271 if (rsp
->truncated_to_mode
== 0
13272 || rsp
->truncation_label
< label_tick_ebb_start
13273 || (GET_MODE_SIZE (truncated_mode
)
13274 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
13276 rsp
->truncated_to_mode
= truncated_mode
;
13277 rsp
->truncation_label
= label_tick
;
13283 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13284 the modes they are used in. This can help truning TRUNCATEs into
13288 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13290 subrtx_var_iterator::array_type array
;
13291 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13292 if (record_truncated_value (*iter
))
13293 iter
.skip_subrtxes ();
13296 /* Scan X for promoted SUBREGs. For each one found,
13297 note what it implies to the registers used in it. */
13300 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13302 if (GET_CODE (x
) == SUBREG
13303 && SUBREG_PROMOTED_VAR_P (x
)
13304 && REG_P (SUBREG_REG (x
)))
13305 record_promoted_value (insn
, x
);
13308 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13311 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13315 check_promoted_subreg (insn
, XEXP (x
, i
));
13319 if (XVEC (x
, i
) != 0)
13320 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13321 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13327 /* Verify that all the registers and memory references mentioned in *LOC are
13328 still valid. *LOC was part of a value set in INSN when label_tick was
13329 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13330 the invalid references with (clobber (const_int 0)) and return 1. This
13331 replacement is useful because we often can get useful information about
13332 the form of a value (e.g., if it was produced by a shift that always
13333 produces -1 or 0) even though we don't know exactly what registers it
13334 was produced from. */
13337 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13340 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13341 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13346 unsigned int regno
= REGNO (x
);
13347 unsigned int endregno
= END_REGNO (x
);
13350 for (j
= regno
; j
< endregno
; j
++)
13352 reg_stat_type
*rsp
= ®_stat
[j
];
13353 if (rsp
->last_set_invalid
13354 /* If this is a pseudo-register that was only set once and not
13355 live at the beginning of the function, it is always valid. */
13356 || (! (regno
>= FIRST_PSEUDO_REGISTER
13357 && regno
< reg_n_sets_max
13358 && REG_N_SETS (regno
) == 1
13359 && (!REGNO_REG_SET_P
13360 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13362 && rsp
->last_set_label
> tick
))
13365 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13372 /* If this is a memory reference, make sure that there were no stores after
13373 it that might have clobbered the value. We don't have alias info, so we
13374 assume any store invalidates it. Moreover, we only have local UIDs, so
13375 we also assume that there were stores in the intervening basic blocks. */
13376 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13377 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13380 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13384 for (i
= 0; i
< len
; i
++)
13388 /* Check for identical subexpressions. If x contains
13389 identical subexpression we only have to traverse one of
13391 if (i
== 1 && ARITHMETIC_P (x
))
13393 /* Note that at this point x0 has already been checked
13394 and found valid. */
13395 rtx x0
= XEXP (x
, 0);
13396 rtx x1
= XEXP (x
, 1);
13398 /* If x0 and x1 are identical then x is also valid. */
13402 /* If x1 is identical to a subexpression of x0 then
13403 while checking x0, x1 has already been checked. Thus
13404 it is valid and so as x. */
13405 if (ARITHMETIC_P (x0
)
13406 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13409 /* If x0 is identical to a subexpression of x1 then x is
13410 valid iff the rest of x1 is valid. */
13411 if (ARITHMETIC_P (x1
)
13412 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13414 get_last_value_validate (&XEXP (x1
,
13415 x0
== XEXP (x1
, 0) ? 1 : 0),
13416 insn
, tick
, replace
);
13419 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13423 else if (fmt
[i
] == 'E')
13424 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13425 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13426 insn
, tick
, replace
) == 0)
13430 /* If we haven't found a reason for it to be invalid, it is valid. */
13434 /* Get the last value assigned to X, if known. Some registers
13435 in the value may be replaced with (clobber (const_int 0)) if their value
13436 is known longer known reliably. */
13439 get_last_value (const_rtx x
)
13441 unsigned int regno
;
13443 reg_stat_type
*rsp
;
13445 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13446 then convert it to the desired mode. If this is a paradoxical SUBREG,
13447 we cannot predict what values the "extra" bits might have. */
13448 if (GET_CODE (x
) == SUBREG
13449 && subreg_lowpart_p (x
)
13450 && !paradoxical_subreg_p (x
)
13451 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13452 return gen_lowpart (GET_MODE (x
), value
);
13458 rsp
= ®_stat
[regno
];
13459 value
= rsp
->last_set_value
;
13461 /* If we don't have a value, or if it isn't for this basic block and
13462 it's either a hard register, set more than once, or it's a live
13463 at the beginning of the function, return 0.
13465 Because if it's not live at the beginning of the function then the reg
13466 is always set before being used (is never used without being set).
13467 And, if it's set only once, and it's always set before use, then all
13468 uses must have the same last value, even if it's not from this basic
13472 || (rsp
->last_set_label
< label_tick_ebb_start
13473 && (regno
< FIRST_PSEUDO_REGISTER
13474 || regno
>= reg_n_sets_max
13475 || REG_N_SETS (regno
) != 1
13477 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13480 /* If the value was set in a later insn than the ones we are processing,
13481 we can't use it even if the register was only set once. */
13482 if (rsp
->last_set_label
== label_tick
13483 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13486 /* If fewer bits were set than what we are asked for now, we cannot use
13488 if (GET_MODE_PRECISION (rsp
->last_set_mode
)
13489 < GET_MODE_PRECISION (GET_MODE (x
)))
13492 /* If the value has all its registers valid, return it. */
13493 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13496 /* Otherwise, make a copy and replace any invalid register with
13497 (clobber (const_int 0)). If that fails for some reason, return 0. */
13499 value
= copy_rtx (value
);
13500 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13506 /* Return nonzero if expression X refers to a REG or to memory
13507 that is set in an instruction more recent than FROM_LUID. */
13510 use_crosses_set_p (const_rtx x
, int from_luid
)
13514 enum rtx_code code
= GET_CODE (x
);
13518 unsigned int regno
= REGNO (x
);
13519 unsigned endreg
= END_REGNO (x
);
13521 #ifdef PUSH_ROUNDING
13522 /* Don't allow uses of the stack pointer to be moved,
13523 because we don't know whether the move crosses a push insn. */
13524 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13527 for (; regno
< endreg
; regno
++)
13529 reg_stat_type
*rsp
= ®_stat
[regno
];
13531 && rsp
->last_set_label
== label_tick
13532 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13538 if (code
== MEM
&& mem_last_set
> from_luid
)
13541 fmt
= GET_RTX_FORMAT (code
);
13543 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13548 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13549 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13552 else if (fmt
[i
] == 'e'
13553 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13559 /* Define three variables used for communication between the following
13562 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13563 static int reg_dead_flag
;
13565 /* Function called via note_stores from reg_dead_at_p.
13567 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13568 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13571 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13573 unsigned int regno
, endregno
;
13578 regno
= REGNO (dest
);
13579 endregno
= END_REGNO (dest
);
13580 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13581 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13584 /* Return nonzero if REG is known to be dead at INSN.
13586 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13587 referencing REG, it is dead. If we hit a SET referencing REG, it is
13588 live. Otherwise, see if it is live or dead at the start of the basic
13589 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13590 must be assumed to be always live. */
13593 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13598 /* Set variables for reg_dead_at_p_1. */
13599 reg_dead_regno
= REGNO (reg
);
13600 reg_dead_endregno
= END_REGNO (reg
);
13604 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13605 we allow the machine description to decide whether use-and-clobber
13606 patterns are OK. */
13607 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13609 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13610 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13614 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13615 beginning of basic block. */
13616 block
= BLOCK_FOR_INSN (insn
);
13621 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13624 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13626 return reg_dead_flag
== 1 ? 1 : 0;
13628 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13632 if (insn
== BB_HEAD (block
))
13635 insn
= PREV_INSN (insn
);
13638 /* Look at live-in sets for the basic block that we were in. */
13639 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13640 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13646 /* Note hard registers in X that are used. */
13649 mark_used_regs_combine (rtx x
)
13651 RTX_CODE code
= GET_CODE (x
);
13652 unsigned int regno
;
13663 case ADDR_DIFF_VEC
:
13665 /* CC0 must die in the insn after it is set, so we don't need to take
13666 special note of it here. */
13671 /* If we are clobbering a MEM, mark any hard registers inside the
13672 address as used. */
13673 if (MEM_P (XEXP (x
, 0)))
13674 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13679 /* A hard reg in a wide mode may really be multiple registers.
13680 If so, mark all of them just like the first. */
13681 if (regno
< FIRST_PSEUDO_REGISTER
)
13683 /* None of this applies to the stack, frame or arg pointers. */
13684 if (regno
== STACK_POINTER_REGNUM
13685 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13686 && regno
== HARD_FRAME_POINTER_REGNUM
)
13687 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13688 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13689 || regno
== FRAME_POINTER_REGNUM
)
13692 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13698 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13700 rtx testreg
= SET_DEST (x
);
13702 while (GET_CODE (testreg
) == SUBREG
13703 || GET_CODE (testreg
) == ZERO_EXTRACT
13704 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13705 testreg
= XEXP (testreg
, 0);
13707 if (MEM_P (testreg
))
13708 mark_used_regs_combine (XEXP (testreg
, 0));
13710 mark_used_regs_combine (SET_SRC (x
));
13718 /* Recursively scan the operands of this expression. */
13721 const char *fmt
= GET_RTX_FORMAT (code
);
13723 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13726 mark_used_regs_combine (XEXP (x
, i
));
13727 else if (fmt
[i
] == 'E')
13731 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13732 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13738 /* Remove register number REGNO from the dead registers list of INSN.
13740 Return the note used to record the death, if there was one. */
13743 remove_death (unsigned int regno
, rtx_insn
*insn
)
13745 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13748 remove_note (insn
, note
);
13753 /* For each register (hardware or pseudo) used within expression X, if its
13754 death is in an instruction with luid between FROM_LUID (inclusive) and
13755 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13756 list headed by PNOTES.
13758 That said, don't move registers killed by maybe_kill_insn.
13760 This is done when X is being merged by combination into TO_INSN. These
13761 notes will then be distributed as needed. */
13764 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13769 enum rtx_code code
= GET_CODE (x
);
13773 unsigned int regno
= REGNO (x
);
13774 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13776 /* Don't move the register if it gets killed in between from and to. */
13777 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13778 && ! reg_referenced_p (x
, maybe_kill_insn
))
13782 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13783 && DF_INSN_LUID (where_dead
) >= from_luid
13784 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13786 rtx note
= remove_death (regno
, where_dead
);
13788 /* It is possible for the call above to return 0. This can occur
13789 when last_death points to I2 or I1 that we combined with.
13790 In that case make a new note.
13792 We must also check for the case where X is a hard register
13793 and NOTE is a death note for a range of hard registers
13794 including X. In that case, we must put REG_DEAD notes for
13795 the remaining registers in place of NOTE. */
13797 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13798 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13799 > GET_MODE_SIZE (GET_MODE (x
))))
13801 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13802 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13803 unsigned int ourend
= END_REGNO (x
);
13806 for (i
= deadregno
; i
< deadend
; i
++)
13807 if (i
< regno
|| i
>= ourend
)
13808 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13811 /* If we didn't find any note, or if we found a REG_DEAD note that
13812 covers only part of the given reg, and we have a multi-reg hard
13813 register, then to be safe we must check for REG_DEAD notes
13814 for each register other than the first. They could have
13815 their own REG_DEAD notes lying around. */
13816 else if ((note
== 0
13818 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13819 < GET_MODE_SIZE (GET_MODE (x
)))))
13820 && regno
< FIRST_PSEUDO_REGISTER
13821 && REG_NREGS (x
) > 1)
13823 unsigned int ourend
= END_REGNO (x
);
13824 unsigned int i
, offset
;
13828 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13832 for (i
= regno
+ offset
; i
< ourend
; i
++)
13833 move_deaths (regno_reg_rtx
[i
],
13834 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13837 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13839 XEXP (note
, 1) = *pnotes
;
13843 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13849 else if (GET_CODE (x
) == SET
)
13851 rtx dest
= SET_DEST (x
);
13853 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13855 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13856 that accesses one word of a multi-word item, some
13857 piece of everything register in the expression is used by
13858 this insn, so remove any old death. */
13859 /* ??? So why do we test for equality of the sizes? */
13861 if (GET_CODE (dest
) == ZERO_EXTRACT
13862 || GET_CODE (dest
) == STRICT_LOW_PART
13863 || (GET_CODE (dest
) == SUBREG
13864 && (((GET_MODE_SIZE (GET_MODE (dest
))
13865 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13866 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13867 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13869 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13873 /* If this is some other SUBREG, we know it replaces the entire
13874 value, so use that as the destination. */
13875 if (GET_CODE (dest
) == SUBREG
)
13876 dest
= SUBREG_REG (dest
);
13878 /* If this is a MEM, adjust deaths of anything used in the address.
13879 For a REG (the only other possibility), the entire value is
13880 being replaced so the old value is not used in this insn. */
13883 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13888 else if (GET_CODE (x
) == CLOBBER
)
13891 len
= GET_RTX_LENGTH (code
);
13892 fmt
= GET_RTX_FORMAT (code
);
13894 for (i
= 0; i
< len
; i
++)
13899 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13900 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13903 else if (fmt
[i
] == 'e')
13904 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13908 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13909 pattern of an insn. X must be a REG. */
13912 reg_bitfield_target_p (rtx x
, rtx body
)
13916 if (GET_CODE (body
) == SET
)
13918 rtx dest
= SET_DEST (body
);
13920 unsigned int regno
, tregno
, endregno
, endtregno
;
13922 if (GET_CODE (dest
) == ZERO_EXTRACT
)
13923 target
= XEXP (dest
, 0);
13924 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
13925 target
= SUBREG_REG (XEXP (dest
, 0));
13929 if (GET_CODE (target
) == SUBREG
)
13930 target
= SUBREG_REG (target
);
13932 if (!REG_P (target
))
13935 tregno
= REGNO (target
), regno
= REGNO (x
);
13936 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
13937 return target
== x
;
13939 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
13940 endregno
= end_hard_regno (GET_MODE (x
), regno
);
13942 return endregno
> tregno
&& regno
< endtregno
;
13945 else if (GET_CODE (body
) == PARALLEL
)
13946 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
13947 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
13953 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13954 as appropriate. I3 and I2 are the insns resulting from the combination
13955 insns including FROM (I2 may be zero).
13957 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13958 not need REG_DEAD notes because they are being substituted for. This
13959 saves searching in the most common cases.
13961 Each note in the list is either ignored or placed on some insns, depending
13962 on the type of note. */
13965 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
13966 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
13968 rtx note
, next_note
;
13970 rtx_insn
*tem_insn
;
13972 for (note
= notes
; note
; note
= next_note
)
13974 rtx_insn
*place
= 0, *place2
= 0;
13976 next_note
= XEXP (note
, 1);
13977 switch (REG_NOTE_KIND (note
))
13981 /* Doesn't matter much where we put this, as long as it's somewhere.
13982 It is preferable to keep these notes on branches, which is most
13983 likely to be i3. */
13987 case REG_NON_LOCAL_GOTO
:
13992 gcc_assert (i2
&& JUMP_P (i2
));
13997 case REG_EH_REGION
:
13998 /* These notes must remain with the call or trapping instruction. */
14001 else if (i2
&& CALL_P (i2
))
14005 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14006 if (may_trap_p (i3
))
14008 else if (i2
&& may_trap_p (i2
))
14010 /* ??? Otherwise assume we've combined things such that we
14011 can now prove that the instructions can't trap. Drop the
14012 note in this case. */
14016 case REG_ARGS_SIZE
:
14017 /* ??? How to distribute between i3-i1. Assume i3 contains the
14018 entire adjustment. Assert i3 contains at least some adjust. */
14019 if (!noop_move_p (i3
))
14021 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
14022 /* fixup_args_size_notes looks at REG_NORETURN note,
14023 so ensure the note is placed there first. */
14027 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14028 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14032 XEXP (n
, 1) = REG_NOTES (i3
);
14033 REG_NOTES (i3
) = n
;
14037 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14038 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14039 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14040 gcc_assert (old_size
!= args_size
14042 && !ACCUMULATE_OUTGOING_ARGS
14043 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14050 case REG_CALL_DECL
:
14051 /* These notes must remain with the call. It should not be
14052 possible for both I2 and I3 to be a call. */
14057 gcc_assert (i2
&& CALL_P (i2
));
14063 /* Any clobbers for i3 may still exist, and so we must process
14064 REG_UNUSED notes from that insn.
14066 Any clobbers from i2 or i1 can only exist if they were added by
14067 recog_for_combine. In that case, recog_for_combine created the
14068 necessary REG_UNUSED notes. Trying to keep any original
14069 REG_UNUSED notes from these insns can cause incorrect output
14070 if it is for the same register as the original i3 dest.
14071 In that case, we will notice that the register is set in i3,
14072 and then add a REG_UNUSED note for the destination of i3, which
14073 is wrong. However, it is possible to have REG_UNUSED notes from
14074 i2 or i1 for register which were both used and clobbered, so
14075 we keep notes from i2 or i1 if they will turn into REG_DEAD
14078 /* If this register is set or clobbered in I3, put the note there
14079 unless there is one already. */
14080 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14082 if (from_insn
!= i3
)
14085 if (! (REG_P (XEXP (note
, 0))
14086 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14087 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14090 /* Otherwise, if this register is used by I3, then this register
14091 now dies here, so we must put a REG_DEAD note here unless there
14093 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14094 && ! (REG_P (XEXP (note
, 0))
14095 ? find_regno_note (i3
, REG_DEAD
,
14096 REGNO (XEXP (note
, 0)))
14097 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14099 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14107 /* These notes say something about results of an insn. We can
14108 only support them if they used to be on I3 in which case they
14109 remain on I3. Otherwise they are ignored.
14111 If the note refers to an expression that is not a constant, we
14112 must also ignore the note since we cannot tell whether the
14113 equivalence is still true. It might be possible to do
14114 slightly better than this (we only have a problem if I2DEST
14115 or I1DEST is present in the expression), but it doesn't
14116 seem worth the trouble. */
14118 if (from_insn
== i3
14119 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14124 /* These notes say something about how a register is used. They must
14125 be present on any use of the register in I2 or I3. */
14126 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14129 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14138 case REG_LABEL_TARGET
:
14139 case REG_LABEL_OPERAND
:
14140 /* This can show up in several ways -- either directly in the
14141 pattern, or hidden off in the constant pool with (or without?)
14142 a REG_EQUAL note. */
14143 /* ??? Ignore the without-reg_equal-note problem for now. */
14144 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14145 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14146 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14147 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14151 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14152 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14153 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14154 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14162 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14163 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14165 if (place
&& JUMP_P (place
)
14166 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14167 && (JUMP_LABEL (place
) == NULL
14168 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14170 rtx label
= JUMP_LABEL (place
);
14173 JUMP_LABEL (place
) = XEXP (note
, 0);
14174 else if (LABEL_P (label
))
14175 LABEL_NUSES (label
)--;
14178 if (place2
&& JUMP_P (place2
)
14179 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14180 && (JUMP_LABEL (place2
) == NULL
14181 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14183 rtx label
= JUMP_LABEL (place2
);
14186 JUMP_LABEL (place2
) = XEXP (note
, 0);
14187 else if (LABEL_P (label
))
14188 LABEL_NUSES (label
)--;
14194 /* This note says something about the value of a register prior
14195 to the execution of an insn. It is too much trouble to see
14196 if the note is still correct in all situations. It is better
14197 to simply delete it. */
14201 /* If we replaced the right hand side of FROM_INSN with a
14202 REG_EQUAL note, the original use of the dying register
14203 will not have been combined into I3 and I2. In such cases,
14204 FROM_INSN is guaranteed to be the first of the combined
14205 instructions, so we simply need to search back before
14206 FROM_INSN for the previous use or set of this register,
14207 then alter the notes there appropriately.
14209 If the register is used as an input in I3, it dies there.
14210 Similarly for I2, if it is nonzero and adjacent to I3.
14212 If the register is not used as an input in either I3 or I2
14213 and it is not one of the registers we were supposed to eliminate,
14214 there are two possibilities. We might have a non-adjacent I2
14215 or we might have somehow eliminated an additional register
14216 from a computation. For example, we might have had A & B where
14217 we discover that B will always be zero. In this case we will
14218 eliminate the reference to A.
14220 In both cases, we must search to see if we can find a previous
14221 use of A and put the death note there. */
14224 && from_insn
== i2mod
14225 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14226 tem_insn
= from_insn
;
14230 && CALL_P (from_insn
)
14231 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14233 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14235 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14236 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14238 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14240 && reg_overlap_mentioned_p (XEXP (note
, 0),
14242 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14243 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14246 /* If the new I2 sets the same register that is marked dead
14247 in the note, we do not know where to put the note.
14249 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14255 basic_block bb
= this_basic_block
;
14257 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14259 if (!NONDEBUG_INSN_P (tem_insn
))
14261 if (tem_insn
== BB_HEAD (bb
))
14266 /* If the register is being set at TEM_INSN, see if that is all
14267 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14268 into a REG_UNUSED note instead. Don't delete sets to
14269 global register vars. */
14270 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14271 || !global_regs
[REGNO (XEXP (note
, 0))])
14272 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14274 rtx set
= single_set (tem_insn
);
14275 rtx inner_dest
= 0;
14276 rtx_insn
*cc0_setter
= NULL
;
14279 for (inner_dest
= SET_DEST (set
);
14280 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14281 || GET_CODE (inner_dest
) == SUBREG
14282 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14283 inner_dest
= XEXP (inner_dest
, 0))
14286 /* Verify that it was the set, and not a clobber that
14287 modified the register.
14289 CC0 targets must be careful to maintain setter/user
14290 pairs. If we cannot delete the setter due to side
14291 effects, mark the user with an UNUSED note instead
14294 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14295 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14297 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14298 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14299 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14301 /* Move the notes and links of TEM_INSN elsewhere.
14302 This might delete other dead insns recursively.
14303 First set the pattern to something that won't use
14305 rtx old_notes
= REG_NOTES (tem_insn
);
14307 PATTERN (tem_insn
) = pc_rtx
;
14308 REG_NOTES (tem_insn
) = NULL
;
14310 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14311 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14312 distribute_links (LOG_LINKS (tem_insn
));
14314 unsigned int regno
= REGNO (XEXP (note
, 0));
14315 reg_stat_type
*rsp
= ®_stat
[regno
];
14316 if (rsp
->last_set
== tem_insn
)
14317 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14319 SET_INSN_DELETED (tem_insn
);
14320 if (tem_insn
== i2
)
14323 /* Delete the setter too. */
14326 PATTERN (cc0_setter
) = pc_rtx
;
14327 old_notes
= REG_NOTES (cc0_setter
);
14328 REG_NOTES (cc0_setter
) = NULL
;
14330 distribute_notes (old_notes
, cc0_setter
,
14332 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14333 distribute_links (LOG_LINKS (cc0_setter
));
14335 SET_INSN_DELETED (cc0_setter
);
14336 if (cc0_setter
== i2
)
14342 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14344 /* If there isn't already a REG_UNUSED note, put one
14345 here. Do not place a REG_DEAD note, even if
14346 the register is also used here; that would not
14347 match the algorithm used in lifetime analysis
14348 and can cause the consistency check in the
14349 scheduler to fail. */
14350 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14351 REGNO (XEXP (note
, 0))))
14356 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14357 || (CALL_P (tem_insn
)
14358 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14362 /* If we are doing a 3->2 combination, and we have a
14363 register which formerly died in i3 and was not used
14364 by i2, which now no longer dies in i3 and is used in
14365 i2 but does not die in i2, and place is between i2
14366 and i3, then we may need to move a link from place to
14368 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14370 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14371 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14373 struct insn_link
*links
= LOG_LINKS (place
);
14374 LOG_LINKS (place
) = NULL
;
14375 distribute_links (links
);
14380 if (tem_insn
== BB_HEAD (bb
))
14386 /* If the register is set or already dead at PLACE, we needn't do
14387 anything with this note if it is still a REG_DEAD note.
14388 We check here if it is set at all, not if is it totally replaced,
14389 which is what `dead_or_set_p' checks, so also check for it being
14392 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14394 unsigned int regno
= REGNO (XEXP (note
, 0));
14395 reg_stat_type
*rsp
= ®_stat
[regno
];
14397 if (dead_or_set_p (place
, XEXP (note
, 0))
14398 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14400 /* Unless the register previously died in PLACE, clear
14401 last_death. [I no longer understand why this is
14403 if (rsp
->last_death
!= place
)
14404 rsp
->last_death
= 0;
14408 rsp
->last_death
= place
;
14410 /* If this is a death note for a hard reg that is occupying
14411 multiple registers, ensure that we are still using all
14412 parts of the object. If we find a piece of the object
14413 that is unused, we must arrange for an appropriate REG_DEAD
14414 note to be added for it. However, we can't just emit a USE
14415 and tag the note to it, since the register might actually
14416 be dead; so we recourse, and the recursive call then finds
14417 the previous insn that used this register. */
14419 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14421 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14422 bool all_used
= true;
14425 for (i
= regno
; i
< endregno
; i
++)
14426 if ((! refers_to_regno_p (i
, PATTERN (place
))
14427 && ! find_regno_fusage (place
, USE
, i
))
14428 || dead_or_set_regno_p (place
, i
))
14436 /* Put only REG_DEAD notes for pieces that are
14437 not already dead or set. */
14439 for (i
= regno
; i
< endregno
;
14440 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
14442 rtx piece
= regno_reg_rtx
[i
];
14443 basic_block bb
= this_basic_block
;
14445 if (! dead_or_set_p (place
, piece
)
14446 && ! reg_bitfield_target_p (piece
,
14449 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14452 distribute_notes (new_note
, place
, place
,
14453 NULL
, NULL_RTX
, NULL_RTX
,
14456 else if (! refers_to_regno_p (i
, PATTERN (place
))
14457 && ! find_regno_fusage (place
, USE
, i
))
14458 for (tem_insn
= PREV_INSN (place
); ;
14459 tem_insn
= PREV_INSN (tem_insn
))
14461 if (!NONDEBUG_INSN_P (tem_insn
))
14463 if (tem_insn
== BB_HEAD (bb
))
14467 if (dead_or_set_p (tem_insn
, piece
)
14468 || reg_bitfield_target_p (piece
,
14469 PATTERN (tem_insn
)))
14471 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14484 /* Any other notes should not be present at this point in the
14486 gcc_unreachable ();
14491 XEXP (note
, 1) = REG_NOTES (place
);
14492 REG_NOTES (place
) = note
;
14496 add_shallow_copy_of_reg_note (place2
, note
);
14500 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14501 I3, I2, and I1 to new locations. This is also called to add a link
14502 pointing at I3 when I3's destination is changed. */
14505 distribute_links (struct insn_link
*links
)
14507 struct insn_link
*link
, *next_link
;
14509 for (link
= links
; link
; link
= next_link
)
14511 rtx_insn
*place
= 0;
14515 next_link
= link
->next
;
14517 /* If the insn that this link points to is a NOTE, ignore it. */
14518 if (NOTE_P (link
->insn
))
14522 rtx pat
= PATTERN (link
->insn
);
14523 if (GET_CODE (pat
) == SET
)
14525 else if (GET_CODE (pat
) == PARALLEL
)
14528 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14530 set
= XVECEXP (pat
, 0, i
);
14531 if (GET_CODE (set
) != SET
)
14534 reg
= SET_DEST (set
);
14535 while (GET_CODE (reg
) == ZERO_EXTRACT
14536 || GET_CODE (reg
) == STRICT_LOW_PART
14537 || GET_CODE (reg
) == SUBREG
)
14538 reg
= XEXP (reg
, 0);
14543 if (REGNO (reg
) == link
->regno
)
14546 if (i
== XVECLEN (pat
, 0))
14552 reg
= SET_DEST (set
);
14554 while (GET_CODE (reg
) == ZERO_EXTRACT
14555 || GET_CODE (reg
) == STRICT_LOW_PART
14556 || GET_CODE (reg
) == SUBREG
)
14557 reg
= XEXP (reg
, 0);
14559 /* A LOG_LINK is defined as being placed on the first insn that uses
14560 a register and points to the insn that sets the register. Start
14561 searching at the next insn after the target of the link and stop
14562 when we reach a set of the register or the end of the basic block.
14564 Note that this correctly handles the link that used to point from
14565 I3 to I2. Also note that not much searching is typically done here
14566 since most links don't point very far away. */
14568 for (insn
= NEXT_INSN (link
->insn
);
14569 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14570 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14571 insn
= NEXT_INSN (insn
))
14572 if (DEBUG_INSN_P (insn
))
14574 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14576 if (reg_referenced_p (reg
, PATTERN (insn
)))
14580 else if (CALL_P (insn
)
14581 && find_reg_fusage (insn
, USE
, reg
))
14586 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14589 /* If we found a place to put the link, place it there unless there
14590 is already a link to the same insn as LINK at that point. */
14594 struct insn_link
*link2
;
14596 FOR_EACH_LOG_LINK (link2
, place
)
14597 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14602 link
->next
= LOG_LINKS (place
);
14603 LOG_LINKS (place
) = link
;
14605 /* Set added_links_insn to the earliest insn we added a
14607 if (added_links_insn
== 0
14608 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14609 added_links_insn
= place
;
14615 /* Check for any register or memory mentioned in EQUIV that is not
14616 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14617 of EXPR where some registers may have been replaced by constants. */
14620 unmentioned_reg_p (rtx equiv
, rtx expr
)
14622 subrtx_iterator::array_type array
;
14623 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14625 const_rtx x
= *iter
;
14626 if ((REG_P (x
) || MEM_P (x
))
14627 && !reg_mentioned_p (x
, expr
))
14633 DEBUG_FUNCTION
void
14634 dump_combine_stats (FILE *file
)
14638 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14639 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14643 dump_combine_total_stats (FILE *file
)
14647 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14648 total_attempts
, total_merges
, total_extras
, total_successes
);
14651 /* Try combining insns through substitution. */
14652 static unsigned int
14653 rest_of_handle_combine (void)
14655 int rebuild_jump_labels_after_combine
;
14657 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14658 df_note_add_problem ();
14661 regstat_init_n_sets_and_refs ();
14662 reg_n_sets_max
= max_reg_num ();
14664 rebuild_jump_labels_after_combine
14665 = combine_instructions (get_insns (), max_reg_num ());
14667 /* Combining insns may have turned an indirect jump into a
14668 direct jump. Rebuild the JUMP_LABEL fields of jumping
14670 if (rebuild_jump_labels_after_combine
)
14672 if (dom_info_available_p (CDI_DOMINATORS
))
14673 free_dominance_info (CDI_DOMINATORS
);
14674 timevar_push (TV_JUMP
);
14675 rebuild_jump_labels (get_insns ());
14677 timevar_pop (TV_JUMP
);
14680 regstat_free_n_sets_and_refs ();
14686 const pass_data pass_data_combine
=
14688 RTL_PASS
, /* type */
14689 "combine", /* name */
14690 OPTGROUP_NONE
, /* optinfo_flags */
14691 TV_COMBINE
, /* tv_id */
14692 PROP_cfglayout
, /* properties_required */
14693 0, /* properties_provided */
14694 0, /* properties_destroyed */
14695 0, /* todo_flags_start */
14696 TODO_df_finish
, /* todo_flags_finish */
14699 class pass_combine
: public rtl_opt_pass
14702 pass_combine (gcc::context
*ctxt
)
14703 : rtl_opt_pass (pass_data_combine
, ctxt
)
14706 /* opt_pass methods: */
14707 virtual bool gate (function
*) { return (optimize
> 0); }
14708 virtual unsigned int execute (function
*)
14710 return rest_of_handle_combine ();
14713 }; // class pass_combine
14715 } // anon namespace
14718 make_pass_combine (gcc::context
*ctxt
)
14720 return new pass_combine (ctxt
);