1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
104 #include "tree-pass.h"
105 #include "valtrack.h"
106 #include "rtl-iter.h"
107 #include "print-rtl.h"
109 /* Number of attempts to combine instructions in this function. */
111 static int combine_attempts
;
113 /* Number of attempts that got as far as substitution in this function. */
115 static int combine_merges
;
117 /* Number of instructions combined with added SETs in this function. */
119 static int combine_extras
;
121 /* Number of instructions combined in this function. */
123 static int combine_successes
;
125 /* Totals over entire compilation. */
127 static int total_attempts
, total_merges
, total_extras
, total_successes
;
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
136 static rtx_insn
*i2mod
;
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
140 static rtx i2mod_old_rhs
;
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
144 static rtx i2mod_new_rhs
;
146 struct reg_stat_type
{
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx_insn
*last_death
;
150 /* Record last point of modification of (hard or pseudo) register n. */
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
159 We use an approach similar to that used by cse, but change it in the
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
166 Therefore, we maintain the following fields:
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
182 (The next two parameters are out of date).
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
196 /* Record last value assigned to (hard or pseudo) register n. */
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
203 int last_set_table_tick
;
205 /* Record the value of label_tick when the value for register n is placed in
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
215 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
216 char last_set_sign_bit_copies
;
217 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
223 char last_set_invalid
;
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
235 If an entry is zero, it means that we don't know anything special. */
237 unsigned char sign_bit_copies
;
239 unsigned HOST_WIDE_INT nonzero_bits
;
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
245 int truncation_label
;
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
252 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
256 static vec
<reg_stat_type
> reg_stat
;
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260 but during combine_split_insns new pseudos can be created. As we don't have
261 updated DF information in that case, it is hard to initialize the array
262 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
263 so instead of growing the arrays, just assume all newly created pseudos
264 during combine might be set multiple times. */
266 static unsigned int reg_n_sets_max
;
268 /* Record the luid of the last insn that invalidated memory
269 (anything that writes memory, and subroutine calls, but not pushes). */
271 static int mem_last_set
;
273 /* Record the luid of the last CALL_INSN
274 so we can tell whether a potential combination crosses any calls. */
276 static int last_call_luid
;
278 /* When `subst' is called, this is the insn that is being modified
279 (by combining in a previous insn). The PATTERN of this insn
280 is still the old pattern partially modified and it should not be
281 looked at, but this may be used to examine the successors of the insn
282 to judge whether a simplification is valid. */
284 static rtx_insn
*subst_insn
;
286 /* This is the lowest LUID that `subst' is currently dealing with.
287 get_last_value will not return a value if the register was set at or
288 after this LUID. If not for this mechanism, we could get confused if
289 I2 or I1 in try_combine were an insn that used the old value of a register
290 to obtain a new value. In that case, we might erroneously get the
291 new value of the register when we wanted the old one. */
293 static int subst_low_luid
;
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296 must consider all these registers to be always live. */
298 static HARD_REG_SET newpat_used_regs
;
300 /* This is an insn to which a LOG_LINKS entry has been added. If this
301 insn is the earlier than I2 or I3, combine should rescan starting at
304 static rtx_insn
*added_links_insn
;
306 /* And similarly, for notes. */
308 static rtx_insn
*added_notes_insn
;
310 /* Basic block in which we are performing combines. */
311 static basic_block this_basic_block
;
312 static bool optimize_this_for_speed_p
;
315 /* Length of the currently allocated uid_insn_cost array. */
317 static int max_uid_known
;
319 /* The following array records the insn_cost for every insn
320 in the instruction stream. */
322 static int *uid_insn_cost
;
324 /* The following array records the LOG_LINKS for every insn in the
325 instruction stream as struct insn_link pointers. */
330 struct insn_link
*next
;
333 static struct insn_link
**uid_log_links
;
336 insn_uid_check (const_rtx insn
)
338 int uid
= INSN_UID (insn
);
339 gcc_checking_assert (uid
<= max_uid_known
);
343 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
346 #define FOR_EACH_LOG_LINK(L, INSN) \
347 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
349 /* Links for LOG_LINKS are allocated from this obstack. */
351 static struct obstack insn_link_obstack
;
353 /* Allocate a link. */
355 static inline struct insn_link
*
356 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
359 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
360 sizeof (struct insn_link
));
367 /* Incremented for each basic block. */
369 static int label_tick
;
371 /* Reset to label_tick for each extended basic block in scanning order. */
373 static int label_tick_ebb_start
;
375 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
376 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
378 static scalar_int_mode nonzero_bits_mode
;
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381 be safely used. It is zero while computing them and after combine has
382 completed. This former test prevents propagating values based on
383 previously set values, which can be incorrect if a variable is modified
386 static int nonzero_sign_valid
;
389 /* Record one modification to rtl structure
390 to be undone by storing old_contents into *where. */
392 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
398 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
399 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403 num_undo says how many are currently recorded.
405 other_insn is nonzero if we have modified some other insn in the process
406 of working on subst_insn. It must be verified too. */
412 rtx_insn
*other_insn
;
415 static struct undobuf undobuf
;
417 /* Number of times the pseudo being substituted for
418 was found and replaced. */
420 static int n_occurrences
;
422 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
424 unsigned HOST_WIDE_INT
*);
425 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
428 static void do_SUBST (rtx
*, rtx
);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn
*);
432 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
433 static int cant_combine_insn_p (rtx_insn
*);
434 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
435 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
436 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
437 static int contains_muldiv (rtx
);
438 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
443 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
444 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
445 static rtx
simplify_if_then_else (rtx
);
446 static rtx
simplify_set (rtx
);
447 static rtx
simplify_logical (rtx
);
448 static rtx
expand_compound_operation (rtx
);
449 static const_rtx
expand_field_assignment (const_rtx
);
450 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
451 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
453 unsigned HOST_WIDE_INT
*);
454 static rtx
canon_reg_for_combine (rtx
, rtx
);
455 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
456 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
457 static rtx
force_to_mode (rtx
, machine_mode
,
458 unsigned HOST_WIDE_INT
, int);
459 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
460 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
461 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
462 static rtx
make_field_assignment (rtx
);
463 static rtx
apply_distributive_law (rtx
);
464 static rtx
distribute_and_simplify_rtx (rtx
, int);
465 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
466 unsigned HOST_WIDE_INT
);
467 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
468 unsigned HOST_WIDE_INT
);
469 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
470 HOST_WIDE_INT
, machine_mode
, int *);
471 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
472 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
474 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
475 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
476 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
478 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
479 static void update_table_tick (rtx
);
480 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
481 static void check_promoted_subreg (rtx_insn
*, rtx
);
482 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
483 static void record_dead_and_set_regs (rtx_insn
*);
484 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
485 static rtx
get_last_value (const_rtx
);
486 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
487 static int reg_dead_at_p (rtx
, rtx_insn
*);
488 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
489 static int reg_bitfield_target_p (rtx
, rtx
);
490 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
491 static void distribute_links (struct insn_link
*);
492 static void mark_used_regs_combine (rtx
);
493 static void record_promoted_value (rtx_insn
*, rtx
);
494 static bool unmentioned_reg_p (rtx
, rtx
);
495 static void record_truncated_values (rtx
*, void *);
496 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
497 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
518 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
524 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
525 bool op0_preserve_value
)
527 int code_int
= (int)*code
;
528 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
529 *code
= (enum rtx_code
)code_int
;
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN cannot be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
539 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
544 ret
= split_insns (pattern
, insn
);
545 nregs
= max_reg_num ();
546 if (nregs
> reg_stat
.length ())
547 reg_stat
.safe_grow_cleared (nregs
);
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
558 find_single_use_1 (rtx dest
, rtx
*loc
)
561 enum rtx_code code
= GET_CODE (x
);
578 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
579 of a REG that occupies all of the REG, the insn uses DEST if
580 it is mentioned in the destination or the source. Otherwise, we
581 need just check the source. */
582 if (GET_CODE (SET_DEST (x
)) != CC0
583 && GET_CODE (SET_DEST (x
)) != PC
584 && !REG_P (SET_DEST (x
))
585 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
586 && REG_P (SUBREG_REG (SET_DEST (x
)))
587 && !read_modify_subreg_p (SET_DEST (x
))))
590 return find_single_use_1 (dest
, &SET_SRC (x
));
594 return find_single_use_1 (dest
, &XEXP (x
, 0));
600 /* If it wasn't one of the common cases above, check each expression and
601 vector of this code. Look for a unique usage of DEST. */
603 fmt
= GET_RTX_FORMAT (code
);
604 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
608 if (dest
== XEXP (x
, i
)
609 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
610 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
613 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
616 result
= this_result
;
617 else if (this_result
)
618 /* Duplicate usage. */
621 else if (fmt
[i
] == 'E')
625 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
627 if (XVECEXP (x
, i
, j
) == dest
629 && REG_P (XVECEXP (x
, i
, j
))
630 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
633 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
636 result
= this_result
;
637 else if (this_result
)
647 /* See if DEST, produced in INSN, is used only a single time in the
648 sequel. If so, return a pointer to the innermost rtx expression in which
651 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
653 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
654 care about REG_DEAD notes or LOG_LINKS.
656 Otherwise, we find the single use by finding an insn that has a
657 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
658 only referenced once in that insn, we know that it must be the first
659 and last insn referencing DEST. */
662 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
667 struct insn_link
*link
;
671 next
= NEXT_INSN (insn
);
673 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
676 result
= find_single_use_1 (dest
, &PATTERN (next
));
685 bb
= BLOCK_FOR_INSN (insn
);
686 for (next
= NEXT_INSN (insn
);
687 next
&& BLOCK_FOR_INSN (next
) == bb
;
688 next
= NEXT_INSN (next
))
689 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
691 FOR_EACH_LOG_LINK (link
, next
)
692 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
697 result
= find_single_use_1 (dest
, &PATTERN (next
));
707 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
708 insn. The substitution can be undone by undo_all. If INTO is already
709 set to NEWVAL, do not record this change. Because computing NEWVAL might
710 also call SUBST, we have to compute it before we put anything into
714 do_SUBST (rtx
*into
, rtx newval
)
719 if (oldval
== newval
)
722 /* We'd like to catch as many invalid transformations here as
723 possible. Unfortunately, there are way too many mode changes
724 that are perfectly valid, so we'd waste too much effort for
725 little gain doing the checks here. Focus on catching invalid
726 transformations involving integer constants. */
727 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
728 && CONST_INT_P (newval
))
730 /* Sanity check that we're replacing oldval with a CONST_INT
731 that is a valid sign-extension for the original mode. */
732 gcc_assert (INTVAL (newval
)
733 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
735 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
736 CONST_INT is not valid, because after the replacement, the
737 original mode would be gone. Unfortunately, we can't tell
738 when do_SUBST is called to replace the operand thereof, so we
739 perform this test on oldval instead, checking whether an
740 invalid replacement took place before we got here. */
741 gcc_assert (!(GET_CODE (oldval
) == SUBREG
742 && CONST_INT_P (SUBREG_REG (oldval
))));
743 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
744 && CONST_INT_P (XEXP (oldval
, 0))));
748 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
750 buf
= XNEW (struct undo
);
752 buf
->kind
= UNDO_RTX
;
754 buf
->old_contents
.r
= oldval
;
757 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
760 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
762 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
763 for the value of a HOST_WIDE_INT value (including CONST_INT) is
767 do_SUBST_INT (int *into
, int newval
)
772 if (oldval
== newval
)
776 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
778 buf
= XNEW (struct undo
);
780 buf
->kind
= UNDO_INT
;
782 buf
->old_contents
.i
= oldval
;
785 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
788 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
790 /* Similar to SUBST, but just substitute the mode. This is used when
791 changing the mode of a pseudo-register, so that any other
792 references to the entry in the regno_reg_rtx array will change as
796 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
799 machine_mode oldval
= GET_MODE (*into
);
801 if (oldval
== newval
)
805 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
807 buf
= XNEW (struct undo
);
809 buf
->kind
= UNDO_MODE
;
811 buf
->old_contents
.m
= oldval
;
812 adjust_reg_mode (*into
, newval
);
814 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
817 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
822 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
825 struct insn_link
* oldval
= *into
;
827 if (oldval
== newval
)
831 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
833 buf
= XNEW (struct undo
);
835 buf
->kind
= UNDO_LINKS
;
837 buf
->old_contents
.l
= oldval
;
840 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
845 /* Subroutine of try_combine. Determine whether the replacement patterns
846 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
847 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
848 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
849 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
850 of all the instructions can be estimated and the replacements are more
851 expensive than the original sequence. */
854 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
855 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
857 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
858 int new_i2_cost
, new_i3_cost
;
859 int old_cost
, new_cost
;
861 /* Lookup the original insn_costs. */
862 i2_cost
= INSN_COST (i2
);
863 i3_cost
= INSN_COST (i3
);
867 i1_cost
= INSN_COST (i1
);
870 i0_cost
= INSN_COST (i0
);
871 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
872 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
876 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
877 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
883 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
884 i1_cost
= i0_cost
= 0;
887 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
889 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
893 /* Calculate the replacement insn_costs. */
894 rtx tmp
= PATTERN (i3
);
895 PATTERN (i3
) = newpat
;
896 int tmpi
= INSN_CODE (i3
);
898 new_i3_cost
= insn_cost (i3
, optimize_this_for_speed_p
);
900 INSN_CODE (i3
) = tmpi
;
904 PATTERN (i2
) = newi2pat
;
905 tmpi
= INSN_CODE (i2
);
907 new_i2_cost
= insn_cost (i2
, optimize_this_for_speed_p
);
909 INSN_CODE (i2
) = tmpi
;
910 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
911 ? new_i2_cost
+ new_i3_cost
: 0;
915 new_cost
= new_i3_cost
;
919 if (undobuf
.other_insn
)
921 int old_other_cost
, new_other_cost
;
923 old_other_cost
= INSN_COST (undobuf
.other_insn
);
924 tmp
= PATTERN (undobuf
.other_insn
);
925 PATTERN (undobuf
.other_insn
) = newotherpat
;
926 tmpi
= INSN_CODE (undobuf
.other_insn
);
927 INSN_CODE (undobuf
.other_insn
) = -1;
928 new_other_cost
= insn_cost (undobuf
.other_insn
,
929 optimize_this_for_speed_p
);
930 PATTERN (undobuf
.other_insn
) = tmp
;
931 INSN_CODE (undobuf
.other_insn
) = tmpi
;
932 if (old_other_cost
> 0 && new_other_cost
> 0)
934 old_cost
+= old_other_cost
;
935 new_cost
+= new_other_cost
;
941 /* Disallow this combination if both new_cost and old_cost are greater than
942 zero, and new_cost is greater than old cost. */
943 int reject
= old_cost
> 0 && new_cost
> old_cost
;
947 fprintf (dump_file
, "%s combination of insns ",
948 reject
? "rejecting" : "allowing");
950 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
951 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
952 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
953 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
955 fprintf (dump_file
, "original costs ");
957 fprintf (dump_file
, "%d + ", i0_cost
);
958 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
959 fprintf (dump_file
, "%d + ", i1_cost
);
960 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
963 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
964 new_i2_cost
, new_i3_cost
, new_cost
);
966 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
972 /* Update the uid_insn_cost array with the replacement costs. */
973 INSN_COST (i2
) = new_i2_cost
;
974 INSN_COST (i3
) = new_i3_cost
;
986 /* Delete any insns that copy a register to itself.
987 Return true if the CFG was changed. */
990 delete_noop_moves (void)
992 rtx_insn
*insn
, *next
;
995 bool edges_deleted
= false;
997 FOR_EACH_BB_FN (bb
, cfun
)
999 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
1001 next
= NEXT_INSN (insn
);
1002 if (INSN_P (insn
) && noop_move_p (insn
))
1005 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
1007 edges_deleted
|= delete_insn_and_edges (insn
);
1012 return edges_deleted
;
1016 /* Return false if we do not want to (or cannot) combine DEF. */
1018 can_combine_def_p (df_ref def
)
1020 /* Do not consider if it is pre/post modification in MEM. */
1021 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
1024 unsigned int regno
= DF_REF_REGNO (def
);
1026 /* Do not combine frame pointer adjustments. */
1027 if ((regno
== FRAME_POINTER_REGNUM
1028 && (!reload_completed
|| frame_pointer_needed
))
1029 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1030 && regno
== HARD_FRAME_POINTER_REGNUM
1031 && (!reload_completed
|| frame_pointer_needed
))
1032 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1033 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1039 /* Return false if we do not want to (or cannot) combine USE. */
1041 can_combine_use_p (df_ref use
)
1043 /* Do not consider the usage of the stack pointer by function call. */
1044 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1050 /* Fill in log links field for all insns. */
1053 create_log_links (void)
1056 rtx_insn
**next_use
;
1060 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1062 /* Pass through each block from the end, recording the uses of each
1063 register and establishing log links when def is encountered.
1064 Note that we do not clear next_use array in order to save time,
1065 so we have to test whether the use is in the same basic block as def.
1067 There are a few cases below when we do not consider the definition or
1068 usage -- these are taken from original flow.c did. Don't ask me why it is
1069 done this way; I don't know and if it works, I don't want to know. */
1071 FOR_EACH_BB_FN (bb
, cfun
)
1073 FOR_BB_INSNS_REVERSE (bb
, insn
)
1075 if (!NONDEBUG_INSN_P (insn
))
1078 /* Log links are created only once. */
1079 gcc_assert (!LOG_LINKS (insn
));
1081 FOR_EACH_INSN_DEF (def
, insn
)
1083 unsigned int regno
= DF_REF_REGNO (def
);
1086 if (!next_use
[regno
])
1089 if (!can_combine_def_p (def
))
1092 use_insn
= next_use
[regno
];
1093 next_use
[regno
] = NULL
;
1095 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1100 We don't build a LOG_LINK for hard registers contained
1101 in ASM_OPERANDs. If these registers get replaced,
1102 we might wind up changing the semantics of the insn,
1103 even if reload can make what appear to be valid
1104 assignments later. */
1105 if (regno
< FIRST_PSEUDO_REGISTER
1106 && asm_noperands (PATTERN (use_insn
)) >= 0)
1109 /* Don't add duplicate links between instructions. */
1110 struct insn_link
*links
;
1111 FOR_EACH_LOG_LINK (links
, use_insn
)
1112 if (insn
== links
->insn
&& regno
== links
->regno
)
1116 LOG_LINKS (use_insn
)
1117 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1120 FOR_EACH_INSN_USE (use
, insn
)
1121 if (can_combine_use_p (use
))
1122 next_use
[DF_REF_REGNO (use
)] = insn
;
1129 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1130 true if we found a LOG_LINK that proves that A feeds B. This only works
1131 if there are no instructions between A and B which could have a link
1132 depending on A, since in that case we would not record a link for B.
1133 We also check the implicit dependency created by a cc0 setter/user
1137 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1139 struct insn_link
*links
;
1140 FOR_EACH_LOG_LINK (links
, b
)
1141 if (links
->insn
== a
)
1143 if (HAVE_cc0
&& sets_cc0_p (a
))
1148 /* Main entry point for combiner. F is the first insn of the function.
1149 NREGS is the first unused pseudo-reg number.
1151 Return nonzero if the CFG was changed (e.g. if the combiner has
1152 turned an indirect jump instruction into a direct jump). */
1154 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1156 rtx_insn
*insn
, *next
;
1158 struct insn_link
*links
, *nextlinks
;
1160 basic_block last_bb
;
1162 int new_direct_jump_p
= 0;
1164 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1165 first
= NEXT_INSN (first
);
1169 combine_attempts
= 0;
1172 combine_successes
= 0;
1174 rtl_hooks
= combine_rtl_hooks
;
1176 reg_stat
.safe_grow_cleared (nregs
);
1178 init_recog_no_volatile ();
1180 /* Allocate array for insn info. */
1181 max_uid_known
= get_max_uid ();
1182 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1183 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1184 gcc_obstack_init (&insn_link_obstack
);
1186 nonzero_bits_mode
= int_mode_for_size (HOST_BITS_PER_WIDE_INT
, 0).require ();
1188 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1189 problems when, for example, we have j <<= 1 in a loop. */
1191 nonzero_sign_valid
= 0;
1192 label_tick
= label_tick_ebb_start
= 1;
1194 /* Scan all SETs and see if we can deduce anything about what
1195 bits are known to be zero for some registers and how many copies
1196 of the sign bit are known to exist for those registers.
1198 Also set any known values so that we can use it while searching
1199 for what bits are known to be set. */
1201 setup_incoming_promotions (first
);
1202 /* Allow the entry block and the first block to fall into the same EBB.
1203 Conceptually the incoming promotions are assigned to the entry block. */
1204 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1206 create_log_links ();
1207 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1209 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1214 if (!single_pred_p (this_basic_block
)
1215 || single_pred (this_basic_block
) != last_bb
)
1216 label_tick_ebb_start
= label_tick
;
1217 last_bb
= this_basic_block
;
1219 FOR_BB_INSNS (this_basic_block
, insn
)
1220 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1224 subst_low_luid
= DF_INSN_LUID (insn
);
1227 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1229 record_dead_and_set_regs (insn
);
1232 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1233 if (REG_NOTE_KIND (links
) == REG_INC
)
1234 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1237 /* Record the current insn_cost of this instruction. */
1238 if (NONJUMP_INSN_P (insn
))
1239 INSN_COST (insn
) = insn_cost (insn
, optimize_this_for_speed_p
);
1242 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1243 dump_insn_slim (dump_file
, insn
);
1248 nonzero_sign_valid
= 1;
1250 /* Now scan all the insns in forward order. */
1251 label_tick
= label_tick_ebb_start
= 1;
1253 setup_incoming_promotions (first
);
1254 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1255 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1257 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1259 rtx_insn
*last_combined_insn
= NULL
;
1261 /* Ignore instruction combination in basic blocks that are going to
1262 be removed as unreachable anyway. See PR82386. */
1263 if (EDGE_COUNT (this_basic_block
->preds
) == 0)
1266 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1271 if (!single_pred_p (this_basic_block
)
1272 || single_pred (this_basic_block
) != last_bb
)
1273 label_tick_ebb_start
= label_tick
;
1274 last_bb
= this_basic_block
;
1276 rtl_profile_for_bb (this_basic_block
);
1277 for (insn
= BB_HEAD (this_basic_block
);
1278 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1279 insn
= next
? next
: NEXT_INSN (insn
))
1282 if (!NONDEBUG_INSN_P (insn
))
1285 while (last_combined_insn
1286 && (!NONDEBUG_INSN_P (last_combined_insn
)
1287 || last_combined_insn
->deleted ()))
1288 last_combined_insn
= PREV_INSN (last_combined_insn
);
1289 if (last_combined_insn
== NULL_RTX
1290 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1291 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1292 last_combined_insn
= insn
;
1294 /* See if we know about function return values before this
1295 insn based upon SUBREG flags. */
1296 check_promoted_subreg (insn
, PATTERN (insn
));
1298 /* See if we can find hardregs and subreg of pseudos in
1299 narrower modes. This could help turning TRUNCATEs
1301 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1303 /* Try this insn with each insn it links back to. */
1305 FOR_EACH_LOG_LINK (links
, insn
)
1306 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1307 NULL
, &new_direct_jump_p
,
1308 last_combined_insn
)) != 0)
1310 statistics_counter_event (cfun
, "two-insn combine", 1);
1314 /* Try each sequence of three linked insns ending with this one. */
1316 if (max_combine
>= 3)
1317 FOR_EACH_LOG_LINK (links
, insn
)
1319 rtx_insn
*link
= links
->insn
;
1321 /* If the linked insn has been replaced by a note, then there
1322 is no point in pursuing this chain any further. */
1326 FOR_EACH_LOG_LINK (nextlinks
, link
)
1327 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1328 NULL
, &new_direct_jump_p
,
1329 last_combined_insn
)) != 0)
1331 statistics_counter_event (cfun
, "three-insn combine", 1);
1336 /* Try to combine a jump insn that uses CC0
1337 with a preceding insn that sets CC0, and maybe with its
1338 logical predecessor as well.
1339 This is how we make decrement-and-branch insns.
1340 We need this special code because data flow connections
1341 via CC0 do not get entered in LOG_LINKS. */
1345 && (prev
= prev_nonnote_insn (insn
)) != 0
1346 && NONJUMP_INSN_P (prev
)
1347 && sets_cc0_p (PATTERN (prev
)))
1349 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1351 last_combined_insn
)) != 0)
1354 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1355 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1356 NULL
, &new_direct_jump_p
,
1357 last_combined_insn
)) != 0)
1361 /* Do the same for an insn that explicitly references CC0. */
1362 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1363 && (prev
= prev_nonnote_insn (insn
)) != 0
1364 && NONJUMP_INSN_P (prev
)
1365 && sets_cc0_p (PATTERN (prev
))
1366 && GET_CODE (PATTERN (insn
)) == SET
1367 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1369 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1371 last_combined_insn
)) != 0)
1374 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1375 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1376 NULL
, &new_direct_jump_p
,
1377 last_combined_insn
)) != 0)
1381 /* Finally, see if any of the insns that this insn links to
1382 explicitly references CC0. If so, try this insn, that insn,
1383 and its predecessor if it sets CC0. */
1386 FOR_EACH_LOG_LINK (links
, insn
)
1387 if (NONJUMP_INSN_P (links
->insn
)
1388 && GET_CODE (PATTERN (links
->insn
)) == SET
1389 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1390 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1391 && NONJUMP_INSN_P (prev
)
1392 && sets_cc0_p (PATTERN (prev
))
1393 && (next
= try_combine (insn
, links
->insn
,
1394 prev
, NULL
, &new_direct_jump_p
,
1395 last_combined_insn
)) != 0)
1399 /* Try combining an insn with two different insns whose results it
1401 if (max_combine
>= 3)
1402 FOR_EACH_LOG_LINK (links
, insn
)
1403 for (nextlinks
= links
->next
; nextlinks
;
1404 nextlinks
= nextlinks
->next
)
1405 if ((next
= try_combine (insn
, links
->insn
,
1406 nextlinks
->insn
, NULL
,
1408 last_combined_insn
)) != 0)
1411 statistics_counter_event (cfun
, "three-insn combine", 1);
1415 /* Try four-instruction combinations. */
1416 if (max_combine
>= 4)
1417 FOR_EACH_LOG_LINK (links
, insn
)
1419 struct insn_link
*next1
;
1420 rtx_insn
*link
= links
->insn
;
1422 /* If the linked insn has been replaced by a note, then there
1423 is no point in pursuing this chain any further. */
1427 FOR_EACH_LOG_LINK (next1
, link
)
1429 rtx_insn
*link1
= next1
->insn
;
1432 /* I0 -> I1 -> I2 -> I3. */
1433 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1434 if ((next
= try_combine (insn
, link
, link1
,
1437 last_combined_insn
)) != 0)
1439 statistics_counter_event (cfun
, "four-insn combine", 1);
1442 /* I0, I1 -> I2, I2 -> I3. */
1443 for (nextlinks
= next1
->next
; nextlinks
;
1444 nextlinks
= nextlinks
->next
)
1445 if ((next
= try_combine (insn
, link
, link1
,
1448 last_combined_insn
)) != 0)
1450 statistics_counter_event (cfun
, "four-insn combine", 1);
1455 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1457 rtx_insn
*link1
= next1
->insn
;
1460 /* I0 -> I2; I1, I2 -> I3. */
1461 FOR_EACH_LOG_LINK (nextlinks
, link
)
1462 if ((next
= try_combine (insn
, link
, link1
,
1465 last_combined_insn
)) != 0)
1467 statistics_counter_event (cfun
, "four-insn combine", 1);
1470 /* I0 -> I1; I1, I2 -> I3. */
1471 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1472 if ((next
= try_combine (insn
, link
, link1
,
1475 last_combined_insn
)) != 0)
1477 statistics_counter_event (cfun
, "four-insn combine", 1);
1483 /* Try this insn with each REG_EQUAL note it links back to. */
1484 FOR_EACH_LOG_LINK (links
, insn
)
1487 rtx_insn
*temp
= links
->insn
;
1488 if ((set
= single_set (temp
)) != 0
1489 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1490 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1491 /* Avoid using a register that may already been marked
1492 dead by an earlier instruction. */
1493 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1494 && (GET_MODE (note
) == VOIDmode
1495 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1496 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1497 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1498 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1499 == GET_MODE (note
))))))
1501 /* Temporarily replace the set's source with the
1502 contents of the REG_EQUAL note. The insn will
1503 be deleted or recognized by try_combine. */
1504 rtx orig_src
= SET_SRC (set
);
1505 rtx orig_dest
= SET_DEST (set
);
1506 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1507 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1508 SET_SRC (set
) = note
;
1510 i2mod_old_rhs
= copy_rtx (orig_src
);
1511 i2mod_new_rhs
= copy_rtx (note
);
1512 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1514 last_combined_insn
);
1518 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1521 SET_SRC (set
) = orig_src
;
1522 SET_DEST (set
) = orig_dest
;
1527 record_dead_and_set_regs (insn
);
1534 default_rtl_profile ();
1536 new_direct_jump_p
|= purge_all_dead_edges ();
1537 new_direct_jump_p
|= delete_noop_moves ();
1540 obstack_free (&insn_link_obstack
, NULL
);
1541 free (uid_log_links
);
1542 free (uid_insn_cost
);
1543 reg_stat
.release ();
1546 struct undo
*undo
, *next
;
1547 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1555 total_attempts
+= combine_attempts
;
1556 total_merges
+= combine_merges
;
1557 total_extras
+= combine_extras
;
1558 total_successes
+= combine_successes
;
1560 nonzero_sign_valid
= 0;
1561 rtl_hooks
= general_rtl_hooks
;
1563 /* Make recognizer allow volatile MEMs again. */
1566 return new_direct_jump_p
;
1569 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1572 init_reg_last (void)
1577 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1578 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1581 /* Set up any promoted values for incoming argument registers. */
1584 setup_incoming_promotions (rtx_insn
*first
)
1587 bool strictly_local
= false;
1589 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1590 arg
= DECL_CHAIN (arg
))
1592 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1594 machine_mode mode1
, mode2
, mode3
, mode4
;
1596 /* Only continue if the incoming argument is in a register. */
1600 /* Determine, if possible, whether all call sites of the current
1601 function lie within the current compilation unit. (This does
1602 take into account the exporting of a function via taking its
1603 address, and so forth.) */
1604 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1606 /* The mode and signedness of the argument before any promotions happen
1607 (equal to the mode of the pseudo holding it at that stage). */
1608 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1609 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1611 /* The mode and signedness of the argument after any source language and
1612 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1613 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1614 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1616 /* The mode and signedness of the argument as it is actually passed,
1617 see assign_parm_setup_reg in function.c. */
1618 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1619 TREE_TYPE (cfun
->decl
), 0);
1621 /* The mode of the register in which the argument is being passed. */
1622 mode4
= GET_MODE (reg
);
1624 /* Eliminate sign extensions in the callee when:
1625 (a) A mode promotion has occurred; */
1628 /* (b) The mode of the register is the same as the mode of
1629 the argument as it is passed; */
1632 /* (c) There's no language level extension; */
1635 /* (c.1) All callers are from the current compilation unit. If that's
1636 the case we don't have to rely on an ABI, we only have to know
1637 what we're generating right now, and we know that we will do the
1638 mode1 to mode2 promotion with the given sign. */
1639 else if (!strictly_local
)
1641 /* (c.2) The combination of the two promotions is useful. This is
1642 true when the signs match, or if the first promotion is unsigned.
1643 In the later case, (sign_extend (zero_extend x)) is the same as
1644 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1650 /* Record that the value was promoted from mode1 to mode3,
1651 so that any sign extension at the head of the current
1652 function may be eliminated. */
1653 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1654 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1655 record_value_for_reg (reg
, first
, x
);
1659 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1660 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1661 because some machines (maybe most) will actually do the sign-extension and
1662 this is the conservative approach.
1664 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1668 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1670 scalar_int_mode int_mode
;
1671 if (CONST_INT_P (src
)
1672 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1673 && GET_MODE_PRECISION (int_mode
) < prec
1675 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1676 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1681 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1685 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1688 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1689 unsigned HOST_WIDE_INT bits
= 0;
1690 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1691 unsigned int num
= 0;
1694 reg_equal
= XEXP (reg_equal_note
, 0);
1696 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1698 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1700 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1703 /* Don't call nonzero_bits if it cannot change anything. */
1704 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1706 machine_mode mode
= GET_MODE (x
);
1707 if (GET_MODE_CLASS (mode
) == MODE_INT
1708 && HWI_COMPUTABLE_MODE_P (mode
))
1709 mode
= nonzero_bits_mode
;
1710 bits
= nonzero_bits (src
, mode
);
1711 if (reg_equal
&& bits
)
1712 bits
&= nonzero_bits (reg_equal
, mode
);
1713 rsp
->nonzero_bits
|= bits
;
1716 /* Don't call num_sign_bit_copies if it cannot change anything. */
1717 if (rsp
->sign_bit_copies
!= 1)
1719 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1720 if (reg_equal
&& maybe_ne (num
, GET_MODE_PRECISION (GET_MODE (x
))))
1722 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1723 if (num
== 0 || numeq
> num
)
1726 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1727 rsp
->sign_bit_copies
= num
;
1731 /* Called via note_stores. If X is a pseudo that is narrower than
1732 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1734 If we are setting only a portion of X and we can't figure out what
1735 portion, assume all bits will be used since we don't know what will
1738 Similarly, set how many bits of X are known to be copies of the sign bit
1739 at all locations in the function. This is the smallest number implied
1743 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1745 rtx_insn
*insn
= (rtx_insn
*) data
;
1746 scalar_int_mode mode
;
1749 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1750 /* If this register is undefined at the start of the file, we can't
1751 say what its contents were. */
1752 && ! REGNO_REG_SET_P
1753 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1754 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1755 && HWI_COMPUTABLE_MODE_P (mode
))
1757 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1759 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1761 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1762 rsp
->sign_bit_copies
= 1;
1766 /* Should not happen as we only using pseduo registers. */
1767 gcc_assert (GET_CODE (set
) != CLOBBER_HIGH
);
1769 /* If this register is being initialized using itself, and the
1770 register is uninitialized in this basic block, and there are
1771 no LOG_LINKS which set the register, then part of the
1772 register is uninitialized. In that case we can't assume
1773 anything about the number of nonzero bits.
1775 ??? We could do better if we checked this in
1776 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1777 could avoid making assumptions about the insn which initially
1778 sets the register, while still using the information in other
1779 insns. We would have to be careful to check every insn
1780 involved in the combination. */
1783 && reg_referenced_p (x
, PATTERN (insn
))
1784 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1787 struct insn_link
*link
;
1789 FOR_EACH_LOG_LINK (link
, insn
)
1790 if (dead_or_set_p (link
->insn
, x
))
1794 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1795 rsp
->sign_bit_copies
= 1;
1800 /* If this is a complex assignment, see if we can convert it into a
1801 simple assignment. */
1802 set
= expand_field_assignment (set
);
1804 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1805 set what we know about X. */
1807 if (SET_DEST (set
) == x
1808 || (paradoxical_subreg_p (SET_DEST (set
))
1809 && SUBREG_REG (SET_DEST (set
)) == x
))
1810 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1813 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1814 rsp
->sign_bit_copies
= 1;
1819 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1820 optionally insns that were previously combined into I3 or that will be
1821 combined into the merger of INSN and I3. The order is PRED, PRED2,
1822 INSN, SUCC, SUCC2, I3.
1824 Return 0 if the combination is not allowed for any reason.
1826 If the combination is allowed, *PDEST will be set to the single
1827 destination of INSN and *PSRC to the single source, and this function
1831 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1832 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1833 rtx
*pdest
, rtx
*psrc
)
1840 bool all_adjacent
= true;
1841 int (*is_volatile_p
) (const_rtx
);
1847 if (next_active_insn (succ2
) != i3
)
1848 all_adjacent
= false;
1849 if (next_active_insn (succ
) != succ2
)
1850 all_adjacent
= false;
1852 else if (next_active_insn (succ
) != i3
)
1853 all_adjacent
= false;
1854 if (next_active_insn (insn
) != succ
)
1855 all_adjacent
= false;
1857 else if (next_active_insn (insn
) != i3
)
1858 all_adjacent
= false;
1860 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1861 or a PARALLEL consisting of such a SET and CLOBBERs.
1863 If INSN has CLOBBER parallel parts, ignore them for our processing.
1864 By definition, these happen during the execution of the insn. When it
1865 is merged with another insn, all bets are off. If they are, in fact,
1866 needed and aren't also supplied in I3, they may be added by
1867 recog_for_combine. Otherwise, it won't match.
1869 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1872 Get the source and destination of INSN. If more than one, can't
1875 if (GET_CODE (PATTERN (insn
)) == SET
)
1876 set
= PATTERN (insn
);
1877 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1878 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1880 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1882 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1884 switch (GET_CODE (elt
))
1886 /* This is important to combine floating point insns
1887 for the SH4 port. */
1889 /* Combining an isolated USE doesn't make sense.
1890 We depend here on combinable_i3pat to reject them. */
1891 /* The code below this loop only verifies that the inputs of
1892 the SET in INSN do not change. We call reg_set_between_p
1893 to verify that the REG in the USE does not change between
1895 If the USE in INSN was for a pseudo register, the matching
1896 insn pattern will likely match any register; combining this
1897 with any other USE would only be safe if we knew that the
1898 used registers have identical values, or if there was
1899 something to tell them apart, e.g. different modes. For
1900 now, we forgo such complicated tests and simply disallow
1901 combining of USES of pseudo registers with any other USE. */
1902 if (REG_P (XEXP (elt
, 0))
1903 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1905 rtx i3pat
= PATTERN (i3
);
1906 int i
= XVECLEN (i3pat
, 0) - 1;
1907 unsigned int regno
= REGNO (XEXP (elt
, 0));
1911 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1913 if (GET_CODE (i3elt
) == USE
1914 && REG_P (XEXP (i3elt
, 0))
1915 && (REGNO (XEXP (i3elt
, 0)) == regno
1916 ? reg_set_between_p (XEXP (elt
, 0),
1917 PREV_INSN (insn
), i3
)
1918 : regno
>= FIRST_PSEUDO_REGISTER
))
1925 /* We can ignore CLOBBERs. */
1931 /* Ignore SETs whose result isn't used but not those that
1932 have side-effects. */
1933 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1934 && insn_nothrow_p (insn
)
1935 && !side_effects_p (elt
))
1938 /* If we have already found a SET, this is a second one and
1939 so we cannot combine with this insn. */
1947 /* Anything else means we can't combine. */
1953 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1954 so don't do anything with it. */
1955 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1964 /* The simplification in expand_field_assignment may call back to
1965 get_last_value, so set safe guard here. */
1966 subst_low_luid
= DF_INSN_LUID (insn
);
1968 set
= expand_field_assignment (set
);
1969 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1971 /* Do not eliminate user-specified register if it is in an
1972 asm input because we may break the register asm usage defined
1973 in GCC manual if allow to do so.
1974 Be aware that this may cover more cases than we expect but this
1975 should be harmless. */
1976 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1977 && extract_asm_operands (PATTERN (i3
)))
1980 /* Don't eliminate a store in the stack pointer. */
1981 if (dest
== stack_pointer_rtx
1982 /* Don't combine with an insn that sets a register to itself if it has
1983 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1984 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1985 /* Can't merge an ASM_OPERANDS. */
1986 || GET_CODE (src
) == ASM_OPERANDS
1987 /* Can't merge a function call. */
1988 || GET_CODE (src
) == CALL
1989 /* Don't eliminate a function call argument. */
1991 && (find_reg_fusage (i3
, USE
, dest
)
1993 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1994 && global_regs
[REGNO (dest
)])))
1995 /* Don't substitute into an incremented register. */
1996 || FIND_REG_INC_NOTE (i3
, dest
)
1997 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1998 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1999 /* Don't substitute into a non-local goto, this confuses CFG. */
2000 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
2001 /* Make sure that DEST is not used after INSN but before SUCC, or
2002 after SUCC and before SUCC2, or after SUCC2 but before I3. */
2005 && (reg_used_between_p (dest
, succ2
, i3
)
2006 || reg_used_between_p (dest
, succ
, succ2
)))
2007 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
2008 || (!succ2
&& !succ
&& reg_used_between_p (dest
, insn
, i3
))
2010 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2011 that case SUCC is not in the insn stream, so use SUCC2
2012 instead for this test. */
2013 && reg_used_between_p (dest
, insn
,
2015 && INSN_UID (succ
) == INSN_UID (succ2
)
2017 /* Make sure that the value that is to be substituted for the register
2018 does not use any registers whose values alter in between. However,
2019 If the insns are adjacent, a use can't cross a set even though we
2020 think it might (this can happen for a sequence of insns each setting
2021 the same destination; last_set of that register might point to
2022 a NOTE). If INSN has a REG_EQUIV note, the register is always
2023 equivalent to the memory so the substitution is valid even if there
2024 are intervening stores. Also, don't move a volatile asm or
2025 UNSPEC_VOLATILE across any other insns. */
2028 || ! find_reg_note (insn
, REG_EQUIV
, src
))
2029 && modified_between_p (src
, insn
, i3
))
2030 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
2031 || GET_CODE (src
) == UNSPEC_VOLATILE
))
2032 /* Don't combine across a CALL_INSN, because that would possibly
2033 change whether the life span of some REGs crosses calls or not,
2034 and it is a pain to update that information.
2035 Exception: if source is a constant, moving it later can't hurt.
2036 Accept that as a special case. */
2037 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2040 /* DEST must either be a REG or CC0. */
2043 /* If register alignment is being enforced for multi-word items in all
2044 cases except for parameters, it is possible to have a register copy
2045 insn referencing a hard register that is not allowed to contain the
2046 mode being copied and which would not be valid as an operand of most
2047 insns. Eliminate this problem by not combining with such an insn.
2049 Also, on some machines we don't want to extend the life of a hard
2053 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2054 && !targetm
.hard_regno_mode_ok (REGNO (dest
), GET_MODE (dest
)))
2055 /* Don't extend the life of a hard register unless it is
2056 user variable (if we have few registers) or it can't
2057 fit into the desired register (meaning something special
2059 Also avoid substituting a return register into I3, because
2060 reload can't handle a conflict with constraints of other
2062 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2063 && !targetm
.hard_regno_mode_ok (REGNO (src
),
2067 else if (GET_CODE (dest
) != CC0
)
2071 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2072 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2073 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2075 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2077 /* If the clobber represents an earlyclobber operand, we must not
2078 substitute an expression containing the clobbered register.
2079 As we do not analyze the constraint strings here, we have to
2080 make the conservative assumption. However, if the register is
2081 a fixed hard reg, the clobber cannot represent any operand;
2082 we leave it up to the machine description to either accept or
2083 reject use-and-clobber patterns. */
2085 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2086 || !fixed_regs
[REGNO (reg
)])
2087 if (reg_overlap_mentioned_p (reg
, src
))
2091 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2092 or not), reject, unless nothing volatile comes between it and I3 */
2094 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2096 /* Make sure neither succ nor succ2 contains a volatile reference. */
2097 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2099 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2101 /* We'll check insns between INSN and I3 below. */
2104 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2105 to be an explicit register variable, and was chosen for a reason. */
2107 if (GET_CODE (src
) == ASM_OPERANDS
2108 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2111 /* If INSN contains volatile references (specifically volatile MEMs),
2112 we cannot combine across any other volatile references.
2113 Even if INSN doesn't contain volatile references, any intervening
2114 volatile insn might affect machine state. */
2116 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2120 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2121 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2124 /* If INSN contains an autoincrement or autodecrement, make sure that
2125 register is not used between there and I3, and not already used in
2126 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2127 Also insist that I3 not be a jump; if it were one
2128 and the incremented register were spilled, we would lose. */
2131 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2132 if (REG_NOTE_KIND (link
) == REG_INC
2134 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2135 || (pred
!= NULL_RTX
2136 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2137 || (pred2
!= NULL_RTX
2138 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2139 || (succ
!= NULL_RTX
2140 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2141 || (succ2
!= NULL_RTX
2142 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2143 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2146 /* Don't combine an insn that follows a CC0-setting insn.
2147 An insn that uses CC0 must not be separated from the one that sets it.
2148 We do, however, allow I2 to follow a CC0-setting insn if that insn
2149 is passed as I1; in that case it will be deleted also.
2150 We also allow combining in this case if all the insns are adjacent
2151 because that would leave the two CC0 insns adjacent as well.
2152 It would be more logical to test whether CC0 occurs inside I1 or I2,
2153 but that would be much slower, and this ought to be equivalent. */
2157 p
= prev_nonnote_insn (insn
);
2158 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2163 /* If we get here, we have passed all the tests and the combination is
2172 /* LOC is the location within I3 that contains its pattern or the component
2173 of a PARALLEL of the pattern. We validate that it is valid for combining.
2175 One problem is if I3 modifies its output, as opposed to replacing it
2176 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2177 doing so would produce an insn that is not equivalent to the original insns.
2181 (set (reg:DI 101) (reg:DI 100))
2182 (set (subreg:SI (reg:DI 101) 0) <foo>)
2184 This is NOT equivalent to:
2186 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2187 (set (reg:DI 101) (reg:DI 100))])
2189 Not only does this modify 100 (in which case it might still be valid
2190 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2192 We can also run into a problem if I2 sets a register that I1
2193 uses and I1 gets directly substituted into I3 (not via I2). In that
2194 case, we would be getting the wrong value of I2DEST into I3, so we
2195 must reject the combination. This case occurs when I2 and I1 both
2196 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2197 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2198 of a SET must prevent combination from occurring. The same situation
2199 can occur for I0, in which case I0_NOT_IN_SRC is set.
2201 Before doing the above check, we first try to expand a field assignment
2202 into a set of logical operations.
2204 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2205 we place a register that is both set and used within I3. If more than one
2206 such register is detected, we fail.
2208 Return 1 if the combination is valid, zero otherwise. */
2211 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2212 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2216 if (GET_CODE (x
) == SET
)
2219 rtx dest
= SET_DEST (set
);
2220 rtx src
= SET_SRC (set
);
2221 rtx inner_dest
= dest
;
2224 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2225 || GET_CODE (inner_dest
) == SUBREG
2226 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2227 inner_dest
= XEXP (inner_dest
, 0);
2229 /* Check for the case where I3 modifies its output, as discussed
2230 above. We don't want to prevent pseudos from being combined
2231 into the address of a MEM, so only prevent the combination if
2232 i1 or i2 set the same MEM. */
2233 if ((inner_dest
!= dest
&&
2234 (!MEM_P (inner_dest
)
2235 || rtx_equal_p (i2dest
, inner_dest
)
2236 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2237 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2238 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2239 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2240 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2242 /* This is the same test done in can_combine_p except we can't test
2243 all_adjacent; we don't have to, since this instruction will stay
2244 in place, thus we are not considering increasing the lifetime of
2247 Also, if this insn sets a function argument, combining it with
2248 something that might need a spill could clobber a previous
2249 function argument; the all_adjacent test in can_combine_p also
2250 checks this; here, we do a more specific test for this case. */
2252 || (REG_P (inner_dest
)
2253 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2254 && !targetm
.hard_regno_mode_ok (REGNO (inner_dest
),
2255 GET_MODE (inner_dest
)))
2256 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2257 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2260 /* If DEST is used in I3, it is being killed in this insn, so
2261 record that for later. We have to consider paradoxical
2262 subregs here, since they kill the whole register, but we
2263 ignore partial subregs, STRICT_LOW_PART, etc.
2264 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2265 STACK_POINTER_REGNUM, since these are always considered to be
2266 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2268 if (GET_CODE (subdest
) == SUBREG
&& !partial_subreg_p (subdest
))
2269 subdest
= SUBREG_REG (subdest
);
2272 && reg_referenced_p (subdest
, PATTERN (i3
))
2273 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2274 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2275 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2276 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2277 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2278 || ! fixed_regs
[REGNO (subdest
)]))
2279 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2281 if (*pi3dest_killed
)
2284 *pi3dest_killed
= subdest
;
2288 else if (GET_CODE (x
) == PARALLEL
)
2292 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2293 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2294 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2301 /* Return 1 if X is an arithmetic expression that contains a multiplication
2302 and division. We don't count multiplications by powers of two here. */
2305 contains_muldiv (rtx x
)
2307 switch (GET_CODE (x
))
2309 case MOD
: case DIV
: case UMOD
: case UDIV
:
2313 return ! (CONST_INT_P (XEXP (x
, 1))
2314 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2317 return contains_muldiv (XEXP (x
, 0))
2318 || contains_muldiv (XEXP (x
, 1));
2321 return contains_muldiv (XEXP (x
, 0));
2327 /* Determine whether INSN can be used in a combination. Return nonzero if
2328 not. This is used in try_combine to detect early some cases where we
2329 can't perform combinations. */
2332 cant_combine_insn_p (rtx_insn
*insn
)
2337 /* If this isn't really an insn, we can't do anything.
2338 This can occur when flow deletes an insn that it has merged into an
2339 auto-increment address. */
2340 if (!NONDEBUG_INSN_P (insn
))
2343 /* Never combine loads and stores involving hard regs that are likely
2344 to be spilled. The register allocator can usually handle such
2345 reg-reg moves by tying. If we allow the combiner to make
2346 substitutions of likely-spilled regs, reload might die.
2347 As an exception, we allow combinations involving fixed regs; these are
2348 not available to the register allocator so there's no risk involved. */
2350 set
= single_set (insn
);
2353 src
= SET_SRC (set
);
2354 dest
= SET_DEST (set
);
2355 if (GET_CODE (src
) == SUBREG
)
2356 src
= SUBREG_REG (src
);
2357 if (GET_CODE (dest
) == SUBREG
)
2358 dest
= SUBREG_REG (dest
);
2359 if (REG_P (src
) && REG_P (dest
)
2360 && ((HARD_REGISTER_P (src
)
2361 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2362 #ifdef LEAF_REGISTERS
2363 && ! LEAF_REGISTERS
[REGNO (src
)])
2367 || (HARD_REGISTER_P (dest
)
2368 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2369 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2375 struct likely_spilled_retval_info
2377 unsigned regno
, nregs
;
2381 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2382 hard registers that are known to be written to / clobbered in full. */
2384 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2386 struct likely_spilled_retval_info
*const info
=
2387 (struct likely_spilled_retval_info
*) data
;
2388 unsigned regno
, nregs
;
2391 if (!REG_P (XEXP (set
, 0)))
2394 if (regno
>= info
->regno
+ info
->nregs
)
2396 nregs
= REG_NREGS (x
);
2397 if (regno
+ nregs
<= info
->regno
)
2399 new_mask
= (2U << (nregs
- 1)) - 1;
2400 if (regno
< info
->regno
)
2401 new_mask
>>= info
->regno
- regno
;
2403 new_mask
<<= regno
- info
->regno
;
2404 info
->mask
&= ~new_mask
;
2407 /* Return nonzero iff part of the return value is live during INSN, and
2408 it is likely spilled. This can happen when more than one insn is needed
2409 to copy the return value, e.g. when we consider to combine into the
2410 second copy insn for a complex value. */
2413 likely_spilled_retval_p (rtx_insn
*insn
)
2415 rtx_insn
*use
= BB_END (this_basic_block
);
2418 unsigned regno
, nregs
;
2419 /* We assume here that no machine mode needs more than
2420 32 hard registers when the value overlaps with a register
2421 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2423 struct likely_spilled_retval_info info
;
2425 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2427 reg
= XEXP (PATTERN (use
), 0);
2428 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2430 regno
= REGNO (reg
);
2431 nregs
= REG_NREGS (reg
);
2434 mask
= (2U << (nregs
- 1)) - 1;
2436 /* Disregard parts of the return value that are set later. */
2440 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2442 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2445 /* Check if any of the (probably) live return value registers is
2450 if ((mask
& 1 << nregs
)
2451 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2457 /* Adjust INSN after we made a change to its destination.
2459 Changing the destination can invalidate notes that say something about
2460 the results of the insn and a LOG_LINK pointing to the insn. */
2463 adjust_for_new_dest (rtx_insn
*insn
)
2465 /* For notes, be conservative and simply remove them. */
2466 remove_reg_equal_equiv_notes (insn
);
2468 /* The new insn will have a destination that was previously the destination
2469 of an insn just above it. Call distribute_links to make a LOG_LINK from
2470 the next use of that destination. */
2472 rtx set
= single_set (insn
);
2475 rtx reg
= SET_DEST (set
);
2477 while (GET_CODE (reg
) == ZERO_EXTRACT
2478 || GET_CODE (reg
) == STRICT_LOW_PART
2479 || GET_CODE (reg
) == SUBREG
)
2480 reg
= XEXP (reg
, 0);
2481 gcc_assert (REG_P (reg
));
2483 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2485 df_insn_rescan (insn
);
2488 /* Return TRUE if combine can reuse reg X in mode MODE.
2489 ADDED_SETS is nonzero if the original set is still required. */
2491 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2498 /* Don't change between modes with different underlying register sizes,
2499 since this could lead to invalid subregs. */
2500 if (maybe_ne (REGMODE_NATURAL_SIZE (mode
),
2501 REGMODE_NATURAL_SIZE (GET_MODE (x
))))
2505 /* Allow hard registers if the new mode is legal, and occupies no more
2506 registers than the old mode. */
2507 if (regno
< FIRST_PSEUDO_REGISTER
)
2508 return (targetm
.hard_regno_mode_ok (regno
, mode
)
2509 && REG_NREGS (x
) >= hard_regno_nregs (regno
, mode
));
2511 /* Or a pseudo that is only used once. */
2512 return (regno
< reg_n_sets_max
2513 && REG_N_SETS (regno
) == 1
2515 && !REG_USERVAR_P (x
));
2519 /* Check whether X, the destination of a set, refers to part of
2520 the register specified by REG. */
2523 reg_subword_p (rtx x
, rtx reg
)
2525 /* Check that reg is an integer mode register. */
2526 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2529 if (GET_CODE (x
) == STRICT_LOW_PART
2530 || GET_CODE (x
) == ZERO_EXTRACT
)
2533 return GET_CODE (x
) == SUBREG
2534 && SUBREG_REG (x
) == reg
2535 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2538 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2539 Note that the INSN should be deleted *after* removing dead edges, so
2540 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2541 but not for a (set (pc) (label_ref FOO)). */
2544 update_cfg_for_uncondjump (rtx_insn
*insn
)
2546 basic_block bb
= BLOCK_FOR_INSN (insn
);
2547 gcc_assert (BB_END (bb
) == insn
);
2549 purge_dead_edges (bb
);
2552 if (EDGE_COUNT (bb
->succs
) == 1)
2556 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2558 /* Remove barriers from the footer if there are any. */
2559 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2560 if (BARRIER_P (insn
))
2562 if (PREV_INSN (insn
))
2563 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2565 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2566 if (NEXT_INSN (insn
))
2567 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2569 else if (LABEL_P (insn
))
2574 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2575 by an arbitrary number of CLOBBERs. */
2577 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2579 if (GET_CODE (pat
) != PARALLEL
)
2582 int len
= XVECLEN (pat
, 0);
2587 for (i
= 0; i
< n
; i
++)
2588 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2589 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2591 for ( ; i
< len
; i
++)
2592 switch (GET_CODE (XVECEXP (pat
, 0, i
)))
2595 if (XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2606 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2607 CLOBBERs), can be split into individual SETs in that order, without
2608 changing semantics. */
2610 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2612 if (!insn_nothrow_p (insn
))
2615 rtx pat
= PATTERN (insn
);
2618 for (i
= 0; i
< n
; i
++)
2620 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2623 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2625 for (j
= i
+ 1; j
< n
; j
++)
2626 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2633 /* Return whether X is just a single set, with the source
2634 a general_operand. */
2636 is_just_move (rtx x
)
2641 return (GET_CODE (x
) == SET
&& general_operand (SET_SRC (x
), VOIDmode
));
2644 /* Callback function to count autoincs. */
2647 count_auto_inc (rtx
, rtx
, rtx
, rtx
, rtx
, void *arg
)
2654 /* Try to combine the insns I0, I1 and I2 into I3.
2655 Here I0, I1 and I2 appear earlier than I3.
2656 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2659 If we are combining more than two insns and the resulting insn is not
2660 recognized, try splitting it into two insns. If that happens, I2 and I3
2661 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2662 Otherwise, I0, I1 and I2 are pseudo-deleted.
2664 Return 0 if the combination does not work. Then nothing is changed.
2665 If we did the combination, return the insn at which combine should
2668 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2669 new direct jump instruction.
2671 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2672 been I3 passed to an earlier try_combine within the same basic
2676 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2677 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2679 /* New patterns for I3 and I2, respectively. */
2680 rtx newpat
, newi2pat
= 0;
2681 rtvec newpat_vec_with_clobbers
= 0;
2682 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2683 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2685 int added_sets_0
, added_sets_1
, added_sets_2
;
2686 /* Total number of SETs to put into I3. */
2688 /* Nonzero if I2's or I1's body now appears in I3. */
2689 int i2_is_used
= 0, i1_is_used
= 0;
2690 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2691 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2692 /* Contains I3 if the destination of I3 is used in its source, which means
2693 that the old life of I3 is being killed. If that usage is placed into
2694 I2 and not in I3, a REG_DEAD note must be made. */
2695 rtx i3dest_killed
= 0;
2696 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2697 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2698 /* Copy of SET_SRC of I1 and I0, if needed. */
2699 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2700 /* Set if I2DEST was reused as a scratch register. */
2701 bool i2scratch
= false;
2702 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2703 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2704 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2705 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2706 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2707 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2708 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2709 /* Notes that must be added to REG_NOTES in I3 and I2. */
2710 rtx new_i3_notes
, new_i2_notes
;
2711 /* Notes that we substituted I3 into I2 instead of the normal case. */
2712 int i3_subst_into_i2
= 0;
2713 /* Notes that I1, I2 or I3 is a MULT operation. */
2717 int changed_i3_dest
= 0;
2718 bool i2_was_move
= false, i3_was_move
= false;
2722 rtx_insn
*temp_insn
;
2724 struct insn_link
*link
;
2726 rtx new_other_notes
;
2728 scalar_int_mode dest_mode
, temp_mode
;
2730 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2732 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2735 /* Only try four-insn combinations when there's high likelihood of
2736 success. Look for simple insns, such as loads of constants or
2737 binary operations involving a constant. */
2745 if (!flag_expensive_optimizations
)
2748 for (i
= 0; i
< 4; i
++)
2750 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2751 rtx set
= single_set (insn
);
2755 src
= SET_SRC (set
);
2756 if (CONSTANT_P (src
))
2761 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2763 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2764 || GET_CODE (src
) == LSHIFTRT
)
2768 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2769 are likely manipulating its value. Ideally we'll be able to combine
2770 all four insns into a bitfield insertion of some kind.
2772 Note the source in I0 might be inside a sign/zero extension and the
2773 memory modes in I0 and I3 might be different. So extract the address
2774 from the destination of I3 and search for it in the source of I0.
2776 In the event that there's a match but the source/dest do not actually
2777 refer to the same memory, the worst that happens is we try some
2778 combinations that we wouldn't have otherwise. */
2779 if ((set0
= single_set (i0
))
2780 /* Ensure the source of SET0 is a MEM, possibly buried inside
2782 && (GET_CODE (SET_SRC (set0
)) == MEM
2783 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2784 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2785 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2786 && (set3
= single_set (i3
))
2787 /* Ensure the destination of SET3 is a MEM. */
2788 && GET_CODE (SET_DEST (set3
)) == MEM
2789 /* Would it be better to extract the base address for the MEM
2790 in SET3 and look for that? I don't have cases where it matters
2791 but I could envision such cases. */
2792 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2795 if (ngood
< 2 && nshift
< 2)
2799 /* Exit early if one of the insns involved can't be used for
2802 || (i1
&& CALL_P (i1
))
2803 || (i0
&& CALL_P (i0
))
2804 || cant_combine_insn_p (i3
)
2805 || cant_combine_insn_p (i2
)
2806 || (i1
&& cant_combine_insn_p (i1
))
2807 || (i0
&& cant_combine_insn_p (i0
))
2808 || likely_spilled_retval_p (i3
))
2812 undobuf
.other_insn
= 0;
2814 /* Reset the hard register usage information. */
2815 CLEAR_HARD_REG_SET (newpat_used_regs
);
2817 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2820 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2821 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2823 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2824 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2826 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2827 INSN_UID (i2
), INSN_UID (i3
));
2830 dump_insn_slim (dump_file
, i0
);
2832 dump_insn_slim (dump_file
, i1
);
2833 dump_insn_slim (dump_file
, i2
);
2834 dump_insn_slim (dump_file
, i3
);
2837 /* If multiple insns feed into one of I2 or I3, they can be in any
2838 order. To simplify the code below, reorder them in sequence. */
2839 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2841 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2843 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2846 added_links_insn
= 0;
2847 added_notes_insn
= 0;
2849 /* First check for one important special case that the code below will
2850 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2851 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2852 we may be able to replace that destination with the destination of I3.
2853 This occurs in the common code where we compute both a quotient and
2854 remainder into a structure, in which case we want to do the computation
2855 directly into the structure to avoid register-register copies.
2857 Note that this case handles both multiple sets in I2 and also cases
2858 where I2 has a number of CLOBBERs inside the PARALLEL.
2860 We make very conservative checks below and only try to handle the
2861 most common cases of this. For example, we only handle the case
2862 where I2 and I3 are adjacent to avoid making difficult register
2865 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2866 && REG_P (SET_SRC (PATTERN (i3
)))
2867 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2868 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2869 && GET_CODE (PATTERN (i2
)) == PARALLEL
2870 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2871 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2872 below would need to check what is inside (and reg_overlap_mentioned_p
2873 doesn't support those codes anyway). Don't allow those destinations;
2874 the resulting insn isn't likely to be recognized anyway. */
2875 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2876 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2877 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2878 SET_DEST (PATTERN (i3
)))
2879 && next_active_insn (i2
) == i3
)
2881 rtx p2
= PATTERN (i2
);
2883 /* Make sure that the destination of I3,
2884 which we are going to substitute into one output of I2,
2885 is not used within another output of I2. We must avoid making this:
2886 (parallel [(set (mem (reg 69)) ...)
2887 (set (reg 69) ...)])
2888 which is not well-defined as to order of actions.
2889 (Besides, reload can't handle output reloads for this.)
2891 The problem can also happen if the dest of I3 is a memory ref,
2892 if another dest in I2 is an indirect memory ref.
2894 Neither can this PARALLEL be an asm. We do not allow combining
2895 that usually (see can_combine_p), so do not here either. */
2897 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2899 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2900 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
2901 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER_HIGH
)
2902 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2903 SET_DEST (XVECEXP (p2
, 0, i
))))
2905 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2906 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2911 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2912 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2913 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2918 subst_low_luid
= DF_INSN_LUID (i2
);
2920 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2921 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2922 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2923 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2925 /* Replace the dest in I2 with our dest and make the resulting
2926 insn the new pattern for I3. Then skip to where we validate
2927 the pattern. Everything was set up above. */
2928 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2930 i3_subst_into_i2
= 1;
2931 goto validate_replacement
;
2935 /* If I2 is setting a pseudo to a constant and I3 is setting some
2936 sub-part of it to another constant, merge them by making a new
2939 && (temp_expr
= single_set (i2
)) != 0
2940 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2941 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2942 && GET_CODE (PATTERN (i3
)) == SET
2943 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2944 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2946 rtx dest
= SET_DEST (PATTERN (i3
));
2947 rtx temp_dest
= SET_DEST (temp_expr
);
2951 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2953 if (CONST_INT_P (XEXP (dest
, 1))
2954 && CONST_INT_P (XEXP (dest
, 2))
2955 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2958 width
= INTVAL (XEXP (dest
, 1));
2959 offset
= INTVAL (XEXP (dest
, 2));
2960 dest
= XEXP (dest
, 0);
2961 if (BITS_BIG_ENDIAN
)
2962 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2967 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2968 dest
= XEXP (dest
, 0);
2969 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2971 width
= GET_MODE_PRECISION (dest_mode
);
2978 /* If this is the low part, we're done. */
2979 if (subreg_lowpart_p (dest
))
2981 /* Handle the case where inner is twice the size of outer. */
2982 else if (GET_MODE_PRECISION (temp_mode
)
2983 == 2 * GET_MODE_PRECISION (dest_mode
))
2984 offset
+= GET_MODE_PRECISION (dest_mode
);
2985 /* Otherwise give up for now. */
2992 rtx inner
= SET_SRC (PATTERN (i3
));
2993 rtx outer
= SET_SRC (temp_expr
);
2995 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2996 rtx_mode_t (inner
, dest_mode
),
3001 subst_low_luid
= DF_INSN_LUID (i2
);
3002 added_sets_2
= added_sets_1
= added_sets_0
= 0;
3004 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3006 /* Replace the source in I2 with the new constant and make the
3007 resulting insn the new pattern for I3. Then skip to where we
3008 validate the pattern. Everything was set up above. */
3009 SUBST (SET_SRC (temp_expr
),
3010 immed_wide_int_const (o
, temp_mode
));
3012 newpat
= PATTERN (i2
);
3014 /* The dest of I3 has been replaced with the dest of I2. */
3015 changed_i3_dest
= 1;
3016 goto validate_replacement
;
3020 /* If we have no I1 and I2 looks like:
3021 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3023 make up a dummy I1 that is
3026 (set (reg:CC X) (compare:CC Y (const_int 0)))
3028 (We can ignore any trailing CLOBBERs.)
3030 This undoes a previous combination and allows us to match a branch-and-
3033 if (!HAVE_cc0
&& i1
== 0
3034 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3035 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
3037 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
3038 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
3039 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
3040 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
3041 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3042 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3044 /* We make I1 with the same INSN_UID as I2. This gives it
3045 the same DF_INSN_LUID for value tracking. Our fake I1 will
3046 never appear in the insn stream so giving it the same INSN_UID
3047 as I2 will not cause a problem. */
3049 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3050 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
3052 INSN_UID (i1
) = INSN_UID (i2
);
3054 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
3055 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
3056 SET_DEST (PATTERN (i1
)));
3057 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
3058 SUBST_LINK (LOG_LINKS (i2
),
3059 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
3062 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3063 make those two SETs separate I1 and I2 insns, and make an I0 that is
3065 if (!HAVE_cc0
&& i0
== 0
3066 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3067 && can_split_parallel_of_n_reg_sets (i2
, 2)
3068 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3069 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
)
3070 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3071 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3073 /* If there is no I1, there is no I0 either. */
3076 /* We make I1 with the same INSN_UID as I2. This gives it
3077 the same DF_INSN_LUID for value tracking. Our fake I1 will
3078 never appear in the insn stream so giving it the same INSN_UID
3079 as I2 will not cause a problem. */
3081 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3082 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
3084 INSN_UID (i1
) = INSN_UID (i2
);
3086 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
3089 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3090 if (!can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
))
3092 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3093 fprintf (dump_file
, "Can't combine i2 into i3\n");
3097 if (i1
&& !can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
, &i1dest
, &i1src
))
3099 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3100 fprintf (dump_file
, "Can't combine i1 into i3\n");
3104 if (i0
&& !can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
, &i0dest
, &i0src
))
3106 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3107 fprintf (dump_file
, "Can't combine i0 into i3\n");
3112 /* Record whether i2 and i3 are trivial moves. */
3113 i2_was_move
= is_just_move (i2
);
3114 i3_was_move
= is_just_move (i3
);
3116 /* Record whether I2DEST is used in I2SRC and similarly for the other
3117 cases. Knowing this will help in register status updating below. */
3118 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3119 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3120 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3121 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3122 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3123 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3124 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3125 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3126 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3128 /* For the earlier insns, determine which of the subsequent ones they
3130 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3131 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3132 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3133 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3134 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3136 /* Ensure that I3's pattern can be the destination of combines. */
3137 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3138 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3139 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3140 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3147 /* See if any of the insns is a MULT operation. Unless one is, we will
3148 reject a combination that is, since it must be slower. Be conservative
3150 if (GET_CODE (i2src
) == MULT
3151 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3152 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3153 || (GET_CODE (PATTERN (i3
)) == SET
3154 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3157 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3158 We used to do this EXCEPT in one case: I3 has a post-inc in an
3159 output operand. However, that exception can give rise to insns like
3161 which is a famous insn on the PDP-11 where the value of r3 used as the
3162 source was model-dependent. Avoid this sort of thing. */
3165 if (!(GET_CODE (PATTERN (i3
)) == SET
3166 && REG_P (SET_SRC (PATTERN (i3
)))
3167 && MEM_P (SET_DEST (PATTERN (i3
)))
3168 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3169 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3170 /* It's not the exception. */
3175 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3176 if (REG_NOTE_KIND (link
) == REG_INC
3177 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3179 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3186 /* See if the SETs in I1 or I2 need to be kept around in the merged
3187 instruction: whenever the value set there is still needed past I3.
3188 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3190 For the SET in I1, we have two cases: if I1 and I2 independently feed
3191 into I3, the set in I1 needs to be kept around unless I1DEST dies
3192 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3193 in I1 needs to be kept around unless I1DEST dies or is set in either
3194 I2 or I3. The same considerations apply to I0. */
3196 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3199 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3200 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3205 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3206 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3207 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3208 && dead_or_set_p (i2
, i0dest
)));
3212 /* We are about to copy insns for the case where they need to be kept
3213 around. Check that they can be copied in the merged instruction. */
3215 if (targetm
.cannot_copy_insn_p
3216 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3217 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3218 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3224 /* Count how many auto_inc expressions there were in the original insns;
3225 we need to have the same number in the resulting patterns. */
3228 for_each_inc_dec (PATTERN (i0
), count_auto_inc
, &n_auto_inc
);
3230 for_each_inc_dec (PATTERN (i1
), count_auto_inc
, &n_auto_inc
);
3231 for_each_inc_dec (PATTERN (i2
), count_auto_inc
, &n_auto_inc
);
3232 for_each_inc_dec (PATTERN (i3
), count_auto_inc
, &n_auto_inc
);
3234 /* If the set in I2 needs to be kept around, we must make a copy of
3235 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3236 PATTERN (I2), we are only substituting for the original I1DEST, not into
3237 an already-substituted copy. This also prevents making self-referential
3238 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3243 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3244 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3246 i2pat
= copy_rtx (PATTERN (i2
));
3251 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3252 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3254 i1pat
= copy_rtx (PATTERN (i1
));
3259 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3260 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3262 i0pat
= copy_rtx (PATTERN (i0
));
3267 /* Substitute in the latest insn for the regs set by the earlier ones. */
3269 maxreg
= max_reg_num ();
3273 /* Many machines that don't use CC0 have insns that can both perform an
3274 arithmetic operation and set the condition code. These operations will
3275 be represented as a PARALLEL with the first element of the vector
3276 being a COMPARE of an arithmetic operation with the constant zero.
3277 The second element of the vector will set some pseudo to the result
3278 of the same arithmetic operation. If we simplify the COMPARE, we won't
3279 match such a pattern and so will generate an extra insn. Here we test
3280 for this case, where both the comparison and the operation result are
3281 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3282 I2SRC. Later we will make the PARALLEL that contains I2. */
3284 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3285 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3286 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3287 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3290 rtx
*cc_use_loc
= NULL
;
3291 rtx_insn
*cc_use_insn
= NULL
;
3292 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3293 machine_mode compare_mode
, orig_compare_mode
;
3294 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3295 scalar_int_mode mode
;
3297 newpat
= PATTERN (i3
);
3298 newpat_dest
= SET_DEST (newpat
);
3299 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3301 if (undobuf
.other_insn
== 0
3302 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3305 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3306 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3307 compare_code
= simplify_compare_const (compare_code
, mode
,
3309 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3312 /* Do the rest only if op1 is const0_rtx, which may be the
3313 result of simplification. */
3314 if (op1
== const0_rtx
)
3316 /* If a single use of the CC is found, prepare to modify it
3317 when SELECT_CC_MODE returns a new CC-class mode, or when
3318 the above simplify_compare_const() returned a new comparison
3319 operator. undobuf.other_insn is assigned the CC use insn
3320 when modifying it. */
3323 #ifdef SELECT_CC_MODE
3324 machine_mode new_mode
3325 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3326 if (new_mode
!= orig_compare_mode
3327 && can_change_dest_mode (SET_DEST (newpat
),
3328 added_sets_2
, new_mode
))
3330 unsigned int regno
= REGNO (newpat_dest
);
3331 compare_mode
= new_mode
;
3332 if (regno
< FIRST_PSEUDO_REGISTER
)
3333 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3336 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3337 newpat_dest
= regno_reg_rtx
[regno
];
3341 /* Cases for modifying the CC-using comparison. */
3342 if (compare_code
!= orig_compare_code
3343 /* ??? Do we need to verify the zero rtx? */
3344 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3346 /* Replace cc_use_loc with entire new RTX. */
3348 gen_rtx_fmt_ee (compare_code
, GET_MODE (*cc_use_loc
),
3349 newpat_dest
, const0_rtx
));
3350 undobuf
.other_insn
= cc_use_insn
;
3352 else if (compare_mode
!= orig_compare_mode
)
3354 /* Just replace the CC reg with a new mode. */
3355 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3356 undobuf
.other_insn
= cc_use_insn
;
3360 /* Now we modify the current newpat:
3361 First, SET_DEST(newpat) is updated if the CC mode has been
3362 altered. For targets without SELECT_CC_MODE, this should be
3364 if (compare_mode
!= orig_compare_mode
)
3365 SUBST (SET_DEST (newpat
), newpat_dest
);
3366 /* This is always done to propagate i2src into newpat. */
3367 SUBST (SET_SRC (newpat
),
3368 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3369 /* Create new version of i2pat if needed; the below PARALLEL
3370 creation needs this to work correctly. */
3371 if (! rtx_equal_p (i2src
, op0
))
3372 i2pat
= gen_rtx_SET (i2dest
, op0
);
3377 if (i2_is_used
== 0)
3379 /* It is possible that the source of I2 or I1 may be performing
3380 an unneeded operation, such as a ZERO_EXTEND of something
3381 that is known to have the high part zero. Handle that case
3382 by letting subst look at the inner insns.
3384 Another way to do this would be to have a function that tries
3385 to simplify a single insn instead of merging two or more
3386 insns. We don't do this because of the potential of infinite
3387 loops and because of the potential extra memory required.
3388 However, doing it the way we are is a bit of a kludge and
3389 doesn't catch all cases.
3391 But only do this if -fexpensive-optimizations since it slows
3392 things down and doesn't usually win.
3394 This is not done in the COMPARE case above because the
3395 unmodified I2PAT is used in the PARALLEL and so a pattern
3396 with a modified I2SRC would not match. */
3398 if (flag_expensive_optimizations
)
3400 /* Pass pc_rtx so no substitutions are done, just
3404 subst_low_luid
= DF_INSN_LUID (i1
);
3405 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3408 subst_low_luid
= DF_INSN_LUID (i2
);
3409 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3412 n_occurrences
= 0; /* `subst' counts here */
3413 subst_low_luid
= DF_INSN_LUID (i2
);
3415 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3416 copy of I2SRC each time we substitute it, in order to avoid creating
3417 self-referential RTL when we will be substituting I1SRC for I1DEST
3418 later. Likewise if I0 feeds into I2, either directly or indirectly
3419 through I1, and I0DEST is in I0SRC. */
3420 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3421 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3422 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3423 && i0dest_in_i0src
));
3426 /* Record whether I2's body now appears within I3's body. */
3427 i2_is_used
= n_occurrences
;
3430 /* If we already got a failure, don't try to do more. Otherwise, try to
3431 substitute I1 if we have it. */
3433 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3435 /* Before we can do this substitution, we must redo the test done
3436 above (see detailed comments there) that ensures I1DEST isn't
3437 mentioned in any SETs in NEWPAT that are field assignments. */
3438 if (!combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3446 subst_low_luid
= DF_INSN_LUID (i1
);
3448 /* If the following substitution will modify I1SRC, make a copy of it
3449 for the case where it is substituted for I1DEST in I2PAT later. */
3450 if (added_sets_2
&& i1_feeds_i2_n
)
3451 i1src_copy
= copy_rtx (i1src
);
3453 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3454 copy of I1SRC each time we substitute it, in order to avoid creating
3455 self-referential RTL when we will be substituting I0SRC for I0DEST
3457 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3458 i0_feeds_i1_n
&& i0dest_in_i0src
);
3461 /* Record whether I1's body now appears within I3's body. */
3462 i1_is_used
= n_occurrences
;
3465 /* Likewise for I0 if we have it. */
3467 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3469 if (!combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3476 /* If the following substitution will modify I0SRC, make a copy of it
3477 for the case where it is substituted for I0DEST in I1PAT later. */
3478 if (added_sets_1
&& i0_feeds_i1_n
)
3479 i0src_copy
= copy_rtx (i0src
);
3480 /* And a copy for I0DEST in I2PAT substitution. */
3481 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3482 || (i0_feeds_i2_n
)))
3483 i0src_copy2
= copy_rtx (i0src
);
3486 subst_low_luid
= DF_INSN_LUID (i0
);
3487 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3493 int new_n_auto_inc
= 0;
3494 for_each_inc_dec (newpat
, count_auto_inc
, &new_n_auto_inc
);
3496 if (n_auto_inc
!= new_n_auto_inc
)
3498 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3499 fprintf (dump_file
, "Number of auto_inc expressions changed\n");
3505 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3506 to count all the ways that I2SRC and I1SRC can be used. */
3507 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3508 && i2_is_used
+ added_sets_2
> 1)
3509 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3510 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3512 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3513 && (n_occurrences
+ added_sets_0
3514 + (added_sets_1
&& i0_feeds_i1_n
)
3515 + (added_sets_2
&& i0_feeds_i2_n
)
3517 /* Fail if we tried to make a new register. */
3518 || max_reg_num () != maxreg
3519 /* Fail if we couldn't do something and have a CLOBBER. */
3520 || GET_CODE (newpat
) == CLOBBER
3521 /* Fail if this new pattern is a MULT and we didn't have one before
3522 at the outer level. */
3523 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3530 /* If the actions of the earlier insns must be kept
3531 in addition to substituting them into the latest one,
3532 we must make a new PARALLEL for the latest insn
3533 to hold additional the SETs. */
3535 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3537 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3540 if (GET_CODE (newpat
) == PARALLEL
)
3542 rtvec old
= XVEC (newpat
, 0);
3543 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3544 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3545 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3546 sizeof (old
->elem
[0]) * old
->num_elem
);
3551 total_sets
= 1 + extra_sets
;
3552 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3553 XVECEXP (newpat
, 0, 0) = old
;
3557 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3563 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3565 XVECEXP (newpat
, 0, --total_sets
) = t
;
3571 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3572 i0_feeds_i1_n
&& i0dest_in_i0src
);
3573 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3574 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3576 XVECEXP (newpat
, 0, --total_sets
) = t
;
3580 validate_replacement
:
3582 /* Note which hard regs this insn has as inputs. */
3583 mark_used_regs_combine (newpat
);
3585 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3586 consider splitting this pattern, we might need these clobbers. */
3587 if (i1
&& GET_CODE (newpat
) == PARALLEL
3588 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3590 int len
= XVECLEN (newpat
, 0);
3592 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3593 for (i
= 0; i
< len
; i
++)
3594 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3597 /* We have recognized nothing yet. */
3598 insn_code_number
= -1;
3600 /* See if this is a PARALLEL of two SETs where one SET's destination is
3601 a register that is unused and this isn't marked as an instruction that
3602 might trap in an EH region. In that case, we just need the other SET.
3603 We prefer this over the PARALLEL.
3605 This can occur when simplifying a divmod insn. We *must* test for this
3606 case here because the code below that splits two independent SETs doesn't
3607 handle this case correctly when it updates the register status.
3609 It's pointless doing this if we originally had two sets, one from
3610 i3, and one from i2. Combining then splitting the parallel results
3611 in the original i2 again plus an invalid insn (which we delete).
3612 The net effect is only to move instructions around, which makes
3613 debug info less accurate.
3615 If the remaining SET came from I2 its destination should not be used
3616 between I2 and I3. See PR82024. */
3618 if (!(added_sets_2
&& i1
== 0)
3619 && is_parallel_of_n_reg_sets (newpat
, 2)
3620 && asm_noperands (newpat
) < 0)
3622 rtx set0
= XVECEXP (newpat
, 0, 0);
3623 rtx set1
= XVECEXP (newpat
, 0, 1);
3624 rtx oldpat
= newpat
;
3626 if (((REG_P (SET_DEST (set1
))
3627 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3628 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3629 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3630 && insn_nothrow_p (i3
)
3631 && !side_effects_p (SET_SRC (set1
)))
3634 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3637 else if (((REG_P (SET_DEST (set0
))
3638 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3639 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3640 && find_reg_note (i3
, REG_UNUSED
,
3641 SUBREG_REG (SET_DEST (set0
)))))
3642 && insn_nothrow_p (i3
)
3643 && !side_effects_p (SET_SRC (set0
)))
3645 rtx dest
= SET_DEST (set1
);
3646 if (GET_CODE (dest
) == SUBREG
)
3647 dest
= SUBREG_REG (dest
);
3648 if (!reg_used_between_p (dest
, i2
, i3
))
3651 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3653 if (insn_code_number
>= 0)
3654 changed_i3_dest
= 1;
3658 if (insn_code_number
< 0)
3662 /* Is the result of combination a valid instruction? */
3663 if (insn_code_number
< 0)
3664 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3666 /* If we were combining three insns and the result is a simple SET
3667 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3668 insns. There are two ways to do this. It can be split using a
3669 machine-specific method (like when you have an addition of a large
3670 constant) or by combine in the function find_split_point. */
3672 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3673 && asm_noperands (newpat
) < 0)
3675 rtx parallel
, *split
;
3676 rtx_insn
*m_split_insn
;
3678 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3679 use I2DEST as a scratch register will help. In the latter case,
3680 convert I2DEST to the mode of the source of NEWPAT if we can. */
3682 m_split_insn
= combine_split_insns (newpat
, i3
);
3684 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3685 inputs of NEWPAT. */
3687 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3688 possible to try that as a scratch reg. This would require adding
3689 more code to make it work though. */
3691 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3693 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3695 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3696 (temporarily, until we are committed to this instruction
3697 combination) does not work: for example, any call to nonzero_bits
3698 on the register (from a splitter in the MD file, for example)
3699 will get the old information, which is invalid.
3701 Since nowadays we can create registers during combine just fine,
3702 we should just create a new one here, not reuse i2dest. */
3704 /* First try to split using the original register as a
3705 scratch register. */
3706 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3707 gen_rtvec (2, newpat
,
3708 gen_rtx_CLOBBER (VOIDmode
,
3710 m_split_insn
= combine_split_insns (parallel
, i3
);
3712 /* If that didn't work, try changing the mode of I2DEST if
3714 if (m_split_insn
== 0
3715 && new_mode
!= GET_MODE (i2dest
)
3716 && new_mode
!= VOIDmode
3717 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3719 machine_mode old_mode
= GET_MODE (i2dest
);
3722 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3723 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3726 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3727 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3730 parallel
= (gen_rtx_PARALLEL
3732 gen_rtvec (2, newpat
,
3733 gen_rtx_CLOBBER (VOIDmode
,
3735 m_split_insn
= combine_split_insns (parallel
, i3
);
3737 if (m_split_insn
== 0
3738 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3742 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3743 buf
= undobuf
.undos
;
3744 undobuf
.undos
= buf
->next
;
3745 buf
->next
= undobuf
.frees
;
3746 undobuf
.frees
= buf
;
3750 i2scratch
= m_split_insn
!= 0;
3753 /* If recog_for_combine has discarded clobbers, try to use them
3754 again for the split. */
3755 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3757 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3758 m_split_insn
= combine_split_insns (parallel
, i3
);
3761 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3763 rtx m_split_pat
= PATTERN (m_split_insn
);
3764 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3765 if (insn_code_number
>= 0)
3766 newpat
= m_split_pat
;
3768 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3769 && (next_nonnote_nondebug_insn (i2
) == i3
3770 || !modified_between_p (PATTERN (m_split_insn
), i2
, i3
)))
3773 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3774 newi2pat
= PATTERN (m_split_insn
);
3776 i3set
= single_set (NEXT_INSN (m_split_insn
));
3777 i2set
= single_set (m_split_insn
);
3779 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3781 /* If I2 or I3 has multiple SETs, we won't know how to track
3782 register status, so don't use these insns. If I2's destination
3783 is used between I2 and I3, we also can't use these insns. */
3785 if (i2_code_number
>= 0 && i2set
&& i3set
3786 && (next_nonnote_nondebug_insn (i2
) == i3
3787 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3788 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3790 if (insn_code_number
>= 0)
3793 /* It is possible that both insns now set the destination of I3.
3794 If so, we must show an extra use of it. */
3796 if (insn_code_number
>= 0)
3798 rtx new_i3_dest
= SET_DEST (i3set
);
3799 rtx new_i2_dest
= SET_DEST (i2set
);
3801 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3802 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3803 || GET_CODE (new_i3_dest
) == SUBREG
)
3804 new_i3_dest
= XEXP (new_i3_dest
, 0);
3806 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3807 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3808 || GET_CODE (new_i2_dest
) == SUBREG
)
3809 new_i2_dest
= XEXP (new_i2_dest
, 0);
3811 if (REG_P (new_i3_dest
)
3812 && REG_P (new_i2_dest
)
3813 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3814 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3815 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3819 /* If we can split it and use I2DEST, go ahead and see if that
3820 helps things be recognized. Verify that none of the registers
3821 are set between I2 and I3. */
3822 if (insn_code_number
< 0
3823 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3824 && (!HAVE_cc0
|| REG_P (i2dest
))
3825 /* We need I2DEST in the proper mode. If it is a hard register
3826 or the only use of a pseudo, we can change its mode.
3827 Make sure we don't change a hard register to have a mode that
3828 isn't valid for it, or change the number of registers. */
3829 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3830 || GET_MODE (*split
) == VOIDmode
3831 || can_change_dest_mode (i2dest
, added_sets_2
,
3833 && (next_nonnote_nondebug_insn (i2
) == i3
3834 || !modified_between_p (*split
, i2
, i3
))
3835 /* We can't overwrite I2DEST if its value is still used by
3837 && ! reg_referenced_p (i2dest
, newpat
))
3839 rtx newdest
= i2dest
;
3840 enum rtx_code split_code
= GET_CODE (*split
);
3841 machine_mode split_mode
= GET_MODE (*split
);
3842 bool subst_done
= false;
3843 newi2pat
= NULL_RTX
;
3847 /* *SPLIT may be part of I2SRC, so make sure we have the
3848 original expression around for later debug processing.
3849 We should not need I2SRC any more in other cases. */
3850 if (MAY_HAVE_DEBUG_BIND_INSNS
)
3851 i2src
= copy_rtx (i2src
);
3855 /* Get NEWDEST as a register in the proper mode. We have already
3856 validated that we can do this. */
3857 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3859 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3860 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3863 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3864 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3868 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3869 an ASHIFT. This can occur if it was inside a PLUS and hence
3870 appeared to be a memory address. This is a kludge. */
3871 if (split_code
== MULT
3872 && CONST_INT_P (XEXP (*split
, 1))
3873 && INTVAL (XEXP (*split
, 1)) > 0
3874 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3876 rtx i_rtx
= gen_int_shift_amount (split_mode
, i
);
3877 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3878 XEXP (*split
, 0), i_rtx
));
3879 /* Update split_code because we may not have a multiply
3881 split_code
= GET_CODE (*split
);
3884 /* Similarly for (plus (mult FOO (const_int pow2))). */
3885 if (split_code
== PLUS
3886 && GET_CODE (XEXP (*split
, 0)) == MULT
3887 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3888 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3889 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3891 rtx nsplit
= XEXP (*split
, 0);
3892 rtx i_rtx
= gen_int_shift_amount (GET_MODE (nsplit
), i
);
3893 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3896 /* Update split_code because we may not have a multiply
3898 split_code
= GET_CODE (*split
);
3901 #ifdef INSN_SCHEDULING
3902 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3903 be written as a ZERO_EXTEND. */
3904 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3906 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3907 what it really is. */
3908 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3910 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3911 SUBREG_REG (*split
)));
3913 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3914 SUBREG_REG (*split
)));
3918 /* Attempt to split binary operators using arithmetic identities. */
3919 if (BINARY_P (SET_SRC (newpat
))
3920 && split_mode
== GET_MODE (SET_SRC (newpat
))
3921 && ! side_effects_p (SET_SRC (newpat
)))
3923 rtx setsrc
= SET_SRC (newpat
);
3924 machine_mode mode
= GET_MODE (setsrc
);
3925 enum rtx_code code
= GET_CODE (setsrc
);
3926 rtx src_op0
= XEXP (setsrc
, 0);
3927 rtx src_op1
= XEXP (setsrc
, 1);
3929 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3930 if (rtx_equal_p (src_op0
, src_op1
))
3932 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3933 SUBST (XEXP (setsrc
, 0), newdest
);
3934 SUBST (XEXP (setsrc
, 1), newdest
);
3937 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3938 else if ((code
== PLUS
|| code
== MULT
)
3939 && GET_CODE (src_op0
) == code
3940 && GET_CODE (XEXP (src_op0
, 0)) == code
3941 && (INTEGRAL_MODE_P (mode
)
3942 || (FLOAT_MODE_P (mode
)
3943 && flag_unsafe_math_optimizations
)))
3945 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3946 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3947 rtx r
= XEXP (src_op0
, 1);
3950 /* Split both "((X op Y) op X) op Y" and
3951 "((X op Y) op Y) op X" as "T op T" where T is
3953 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3954 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3956 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3957 SUBST (XEXP (setsrc
, 0), newdest
);
3958 SUBST (XEXP (setsrc
, 1), newdest
);
3961 /* Split "((X op X) op Y) op Y)" as "T op T" where
3963 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3965 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3966 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3967 SUBST (XEXP (setsrc
, 0), newdest
);
3968 SUBST (XEXP (setsrc
, 1), newdest
);
3976 newi2pat
= gen_rtx_SET (newdest
, *split
);
3977 SUBST (*split
, newdest
);
3980 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3982 /* recog_for_combine might have added CLOBBERs to newi2pat.
3983 Make sure NEWPAT does not depend on the clobbered regs. */
3984 if (GET_CODE (newi2pat
) == PARALLEL
)
3985 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3986 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3988 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3989 if (reg_overlap_mentioned_p (reg
, newpat
))
3996 /* If the split point was a MULT and we didn't have one before,
3997 don't use one now. */
3998 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3999 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4003 /* Check for a case where we loaded from memory in a narrow mode and
4004 then sign extended it, but we need both registers. In that case,
4005 we have a PARALLEL with both loads from the same memory location.
4006 We can split this into a load from memory followed by a register-register
4007 copy. This saves at least one insn, more if register allocation can
4010 We cannot do this if the destination of the first assignment is a
4011 condition code register or cc0. We eliminate this case by making sure
4012 the SET_DEST and SET_SRC have the same mode.
4014 We cannot do this if the destination of the second assignment is
4015 a register that we have already assumed is zero-extended. Similarly
4016 for a SUBREG of such a register. */
4018 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
4019 && GET_CODE (newpat
) == PARALLEL
4020 && XVECLEN (newpat
, 0) == 2
4021 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
4022 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
4023 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
4024 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
4025 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
4026 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
4027 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
4028 && !modified_between_p (SET_SRC (XVECEXP (newpat
, 0, 1)), i2
, i3
)
4029 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
4030 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
4031 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
4033 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
4034 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4036 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4038 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
4039 != GET_MODE_MASK (word_mode
))))
4040 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
4041 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
4043 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
4044 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4046 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4048 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
4049 != GET_MODE_MASK (word_mode
)))))
4050 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4051 SET_SRC (XVECEXP (newpat
, 0, 1)))
4052 && ! find_reg_note (i3
, REG_UNUSED
,
4053 SET_DEST (XVECEXP (newpat
, 0, 0))))
4057 newi2pat
= XVECEXP (newpat
, 0, 0);
4058 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
4059 newpat
= XVECEXP (newpat
, 0, 1);
4060 SUBST (SET_SRC (newpat
),
4061 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
4062 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4064 if (i2_code_number
>= 0)
4065 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4067 if (insn_code_number
>= 0)
4071 /* Similarly, check for a case where we have a PARALLEL of two independent
4072 SETs but we started with three insns. In this case, we can do the sets
4073 as two separate insns. This case occurs when some SET allows two
4074 other insns to combine, but the destination of that SET is still live.
4076 Also do this if we started with two insns and (at least) one of the
4077 resulting sets is a noop; this noop will be deleted later.
4079 Also do this if we started with two insns neither of which was a simple
4082 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
4083 && GET_CODE (newpat
) == PARALLEL
4084 && XVECLEN (newpat
, 0) == 2
4085 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
4086 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
4088 || set_noop_p (XVECEXP (newpat
, 0, 0))
4089 || set_noop_p (XVECEXP (newpat
, 0, 1))
4090 || (!i2_was_move
&& !i3_was_move
))
4091 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
4092 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
4093 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
4094 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
4095 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4096 XVECEXP (newpat
, 0, 0))
4097 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
4098 XVECEXP (newpat
, 0, 1))
4099 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
4100 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
4102 rtx set0
= XVECEXP (newpat
, 0, 0);
4103 rtx set1
= XVECEXP (newpat
, 0, 1);
4105 /* Normally, it doesn't matter which of the two is done first,
4106 but the one that references cc0 can't be the second, and
4107 one which uses any regs/memory set in between i2 and i3 can't
4108 be first. The PARALLEL might also have been pre-existing in i3,
4109 so we need to make sure that we won't wrongly hoist a SET to i2
4110 that would conflict with a death note present in there, or would
4111 have its dest modified between i2 and i3. */
4112 if (!modified_between_p (SET_SRC (set1
), i2
, i3
)
4113 && !(REG_P (SET_DEST (set1
))
4114 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
4115 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
4116 && find_reg_note (i2
, REG_DEAD
,
4117 SUBREG_REG (SET_DEST (set1
))))
4118 && !modified_between_p (SET_DEST (set1
), i2
, i3
)
4119 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
4120 /* If I3 is a jump, ensure that set0 is a jump so that
4121 we do not create invalid RTL. */
4122 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
4128 else if (!modified_between_p (SET_SRC (set0
), i2
, i3
)
4129 && !(REG_P (SET_DEST (set0
))
4130 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
4131 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
4132 && find_reg_note (i2
, REG_DEAD
,
4133 SUBREG_REG (SET_DEST (set0
))))
4134 && !modified_between_p (SET_DEST (set0
), i2
, i3
)
4135 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
4136 /* If I3 is a jump, ensure that set1 is a jump so that
4137 we do not create invalid RTL. */
4138 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
4150 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4152 if (i2_code_number
>= 0)
4154 /* recog_for_combine might have added CLOBBERs to newi2pat.
4155 Make sure NEWPAT does not depend on the clobbered regs. */
4156 if (GET_CODE (newi2pat
) == PARALLEL
)
4158 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4159 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4161 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4162 if (reg_overlap_mentioned_p (reg
, newpat
))
4170 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4172 if (insn_code_number
>= 0)
4177 /* If it still isn't recognized, fail and change things back the way they
4179 if ((insn_code_number
< 0
4180 /* Is the result a reasonable ASM_OPERANDS? */
4181 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4187 /* If we had to change another insn, make sure it is valid also. */
4188 if (undobuf
.other_insn
)
4190 CLEAR_HARD_REG_SET (newpat_used_regs
);
4192 other_pat
= PATTERN (undobuf
.other_insn
);
4193 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4196 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4203 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4204 they are adjacent to each other or not. */
4207 rtx_insn
*p
= prev_nonnote_insn (i3
);
4208 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4209 && sets_cc0_p (newi2pat
))
4216 /* Only allow this combination if insn_cost reports that the
4217 replacement instructions are cheaper than the originals. */
4218 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4224 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4228 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4229 if (undo
->kind
== UNDO_MODE
)
4231 rtx reg
= *undo
->where
.r
;
4232 machine_mode new_mode
= GET_MODE (reg
);
4233 machine_mode old_mode
= undo
->old_contents
.m
;
4235 /* Temporarily revert mode back. */
4236 adjust_reg_mode (reg
, old_mode
);
4238 if (reg
== i2dest
&& i2scratch
)
4240 /* If we used i2dest as a scratch register with a
4241 different mode, substitute it for the original
4242 i2src while its original mode is temporarily
4243 restored, and then clear i2scratch so that we don't
4244 do it again later. */
4245 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4248 /* Put back the new mode. */
4249 adjust_reg_mode (reg
, new_mode
);
4253 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4254 rtx_insn
*first
, *last
;
4259 last
= last_combined_insn
;
4264 last
= undobuf
.other_insn
;
4266 if (DF_INSN_LUID (last
)
4267 < DF_INSN_LUID (last_combined_insn
))
4268 last
= last_combined_insn
;
4271 /* We're dealing with a reg that changed mode but not
4272 meaning, so we want to turn it into a subreg for
4273 the new mode. However, because of REG sharing and
4274 because its mode had already changed, we have to do
4275 it in two steps. First, replace any debug uses of
4276 reg, with its original mode temporarily restored,
4277 with this copy we have created; then, replace the
4278 copy with the SUBREG of the original shared reg,
4279 once again changed to the new mode. */
4280 propagate_for_debug (first
, last
, reg
, tempreg
,
4282 adjust_reg_mode (reg
, new_mode
);
4283 propagate_for_debug (first
, last
, tempreg
,
4284 lowpart_subreg (old_mode
, reg
, new_mode
),
4290 /* If we will be able to accept this, we have made a
4291 change to the destination of I3. This requires us to
4292 do a few adjustments. */
4294 if (changed_i3_dest
)
4296 PATTERN (i3
) = newpat
;
4297 adjust_for_new_dest (i3
);
4300 /* We now know that we can do this combination. Merge the insns and
4301 update the status of registers and LOG_LINKS. */
4303 if (undobuf
.other_insn
)
4307 PATTERN (undobuf
.other_insn
) = other_pat
;
4309 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4310 ensure that they are still valid. Then add any non-duplicate
4311 notes added by recog_for_combine. */
4312 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4314 next
= XEXP (note
, 1);
4316 if ((REG_NOTE_KIND (note
) == REG_DEAD
4317 && !reg_referenced_p (XEXP (note
, 0),
4318 PATTERN (undobuf
.other_insn
)))
4319 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4320 && !reg_set_p (XEXP (note
, 0),
4321 PATTERN (undobuf
.other_insn
)))
4322 /* Simply drop equal note since it may be no longer valid
4323 for other_insn. It may be possible to record that CC
4324 register is changed and only discard those notes, but
4325 in practice it's unnecessary complication and doesn't
4326 give any meaningful improvement.
4329 || REG_NOTE_KIND (note
) == REG_EQUAL
4330 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4331 remove_note (undobuf
.other_insn
, note
);
4334 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4335 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4341 /* I3 now uses what used to be its destination and which is now
4342 I2's destination. This requires us to do a few adjustments. */
4343 PATTERN (i3
) = newpat
;
4344 adjust_for_new_dest (i3
);
4347 if (swap_i2i3
|| split_i2i3
)
4349 /* We might need a LOG_LINK from I3 to I2. But then we used to
4350 have one, so we still will.
4352 However, some later insn might be using I2's dest and have
4353 a LOG_LINK pointing at I3. We should change it to point at
4356 /* newi2pat is usually a SET here; however, recog_for_combine might
4357 have added some clobbers. */
4359 if (GET_CODE (x
) == PARALLEL
)
4360 x
= XVECEXP (newi2pat
, 0, 0);
4362 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4363 unsigned int regno
= reg_or_subregno (SET_DEST (x
));
4366 for (rtx_insn
*insn
= NEXT_INSN (i3
);
4369 && NONDEBUG_INSN_P (insn
)
4370 && BLOCK_FOR_INSN (insn
) == this_basic_block
;
4371 insn
= NEXT_INSN (insn
))
4373 struct insn_link
*link
;
4374 FOR_EACH_LOG_LINK (link
, insn
)
4375 if (link
->insn
== i3
&& link
->regno
== regno
)
4385 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4386 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4389 /* Compute which registers we expect to eliminate. newi2pat may be setting
4390 either i3dest or i2dest, so we must check it. */
4391 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4392 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4395 /* For i1, we need to compute both local elimination and global
4396 elimination information with respect to newi2pat because i1dest
4397 may be the same as i3dest, in which case newi2pat may be setting
4398 i1dest. Global information is used when distributing REG_DEAD
4399 note for i2 and i3, in which case it does matter if newi2pat sets
4402 Local information is used when distributing REG_DEAD note for i1,
4403 in which case it doesn't matter if newi2pat sets i1dest or not.
4404 See PR62151, if we have four insns combination:
4406 i1: r1 <- i1src (using r0)
4408 i2: r0 <- i2src (using r1)
4409 i3: r3 <- i3src (using r0)
4411 From i1's point of view, r0 is eliminated, no matter if it is set
4412 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4413 should be discarded.
4415 Note local information only affects cases in forms like "I1->I2->I3",
4416 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4417 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4419 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4422 rtx elim_i1
= (local_elim_i1
== 0
4423 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4425 /* Same case as i1. */
4426 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4428 rtx elim_i0
= (local_elim_i0
== 0
4429 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4432 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4434 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4435 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4437 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4439 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4441 /* Ensure that we do not have something that should not be shared but
4442 occurs multiple times in the new insns. Check this by first
4443 resetting all the `used' flags and then copying anything is shared. */
4445 reset_used_flags (i3notes
);
4446 reset_used_flags (i2notes
);
4447 reset_used_flags (i1notes
);
4448 reset_used_flags (i0notes
);
4449 reset_used_flags (newpat
);
4450 reset_used_flags (newi2pat
);
4451 if (undobuf
.other_insn
)
4452 reset_used_flags (PATTERN (undobuf
.other_insn
));
4454 i3notes
= copy_rtx_if_shared (i3notes
);
4455 i2notes
= copy_rtx_if_shared (i2notes
);
4456 i1notes
= copy_rtx_if_shared (i1notes
);
4457 i0notes
= copy_rtx_if_shared (i0notes
);
4458 newpat
= copy_rtx_if_shared (newpat
);
4459 newi2pat
= copy_rtx_if_shared (newi2pat
);
4460 if (undobuf
.other_insn
)
4461 reset_used_flags (PATTERN (undobuf
.other_insn
));
4463 INSN_CODE (i3
) = insn_code_number
;
4464 PATTERN (i3
) = newpat
;
4466 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4468 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4469 link
= XEXP (link
, 1))
4473 /* I2SRC must still be meaningful at this point. Some
4474 splitting operations can invalidate I2SRC, but those
4475 operations do not apply to calls. */
4477 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4481 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4484 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4489 if (undobuf
.other_insn
)
4490 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4492 /* We had one special case above where I2 had more than one set and
4493 we replaced a destination of one of those sets with the destination
4494 of I3. In that case, we have to update LOG_LINKS of insns later
4495 in this basic block. Note that this (expensive) case is rare.
4497 Also, in this case, we must pretend that all REG_NOTEs for I2
4498 actually came from I3, so that REG_UNUSED notes from I2 will be
4499 properly handled. */
4501 if (i3_subst_into_i2
)
4503 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4504 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4505 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4506 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4507 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4508 && ! find_reg_note (i2
, REG_UNUSED
,
4509 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4510 for (temp_insn
= NEXT_INSN (i2
);
4512 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4513 || BB_HEAD (this_basic_block
) != temp_insn
);
4514 temp_insn
= NEXT_INSN (temp_insn
))
4515 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4516 FOR_EACH_LOG_LINK (link
, temp_insn
)
4517 if (link
->insn
== i2
)
4523 while (XEXP (link
, 1))
4524 link
= XEXP (link
, 1);
4525 XEXP (link
, 1) = i2notes
;
4532 LOG_LINKS (i3
) = NULL
;
4534 LOG_LINKS (i2
) = NULL
;
4539 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2scratch
)
4540 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4542 INSN_CODE (i2
) = i2_code_number
;
4543 PATTERN (i2
) = newi2pat
;
4547 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2src
)
4548 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4550 SET_INSN_DELETED (i2
);
4555 LOG_LINKS (i1
) = NULL
;
4557 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4558 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4560 SET_INSN_DELETED (i1
);
4565 LOG_LINKS (i0
) = NULL
;
4567 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4568 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4570 SET_INSN_DELETED (i0
);
4573 /* Get death notes for everything that is now used in either I3 or
4574 I2 and used to die in a previous insn. If we built two new
4575 patterns, move from I1 to I2 then I2 to I3 so that we get the
4576 proper movement on registers that I2 modifies. */
4579 from_luid
= DF_INSN_LUID (i0
);
4581 from_luid
= DF_INSN_LUID (i1
);
4583 from_luid
= DF_INSN_LUID (i2
);
4585 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4586 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4588 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4590 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4591 elim_i2
, elim_i1
, elim_i0
);
4593 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4594 elim_i2
, elim_i1
, elim_i0
);
4596 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4597 elim_i2
, local_elim_i1
, local_elim_i0
);
4599 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4600 elim_i2
, elim_i1
, local_elim_i0
);
4602 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4603 elim_i2
, elim_i1
, elim_i0
);
4605 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4606 know these are REG_UNUSED and want them to go to the desired insn,
4607 so we always pass it as i3. */
4609 if (newi2pat
&& new_i2_notes
)
4610 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4614 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4617 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4618 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4619 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4620 in that case, it might delete I2. Similarly for I2 and I1.
4621 Show an additional death due to the REG_DEAD note we make here. If
4622 we discard it in distribute_notes, we will decrement it again. */
4626 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4627 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4628 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4631 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4632 elim_i2
, elim_i1
, elim_i0
);
4635 if (i2dest_in_i2src
)
4637 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4638 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4639 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4640 NULL_RTX
, NULL_RTX
);
4642 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4643 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4646 if (i1dest_in_i1src
)
4648 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4649 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4650 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4651 NULL_RTX
, NULL_RTX
);
4653 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4654 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4657 if (i0dest_in_i0src
)
4659 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4660 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4661 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4662 NULL_RTX
, NULL_RTX
);
4664 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4665 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4668 distribute_links (i3links
);
4669 distribute_links (i2links
);
4670 distribute_links (i1links
);
4671 distribute_links (i0links
);
4675 struct insn_link
*link
;
4676 rtx_insn
*i2_insn
= 0;
4677 rtx i2_val
= 0, set
;
4679 /* The insn that used to set this register doesn't exist, and
4680 this life of the register may not exist either. See if one of
4681 I3's links points to an insn that sets I2DEST. If it does,
4682 that is now the last known value for I2DEST. If we don't update
4683 this and I2 set the register to a value that depended on its old
4684 contents, we will get confused. If this insn is used, thing
4685 will be set correctly in combine_instructions. */
4686 FOR_EACH_LOG_LINK (link
, i3
)
4687 if ((set
= single_set (link
->insn
)) != 0
4688 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4689 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4691 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4693 /* If the reg formerly set in I2 died only once and that was in I3,
4694 zero its use count so it won't make `reload' do any work. */
4696 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4697 && ! i2dest_in_i2src
4698 && REGNO (i2dest
) < reg_n_sets_max
)
4699 INC_REG_N_SETS (REGNO (i2dest
), -1);
4702 if (i1
&& REG_P (i1dest
))
4704 struct insn_link
*link
;
4705 rtx_insn
*i1_insn
= 0;
4706 rtx i1_val
= 0, set
;
4708 FOR_EACH_LOG_LINK (link
, i3
)
4709 if ((set
= single_set (link
->insn
)) != 0
4710 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4711 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4713 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4716 && ! i1dest_in_i1src
4717 && REGNO (i1dest
) < reg_n_sets_max
)
4718 INC_REG_N_SETS (REGNO (i1dest
), -1);
4721 if (i0
&& REG_P (i0dest
))
4723 struct insn_link
*link
;
4724 rtx_insn
*i0_insn
= 0;
4725 rtx i0_val
= 0, set
;
4727 FOR_EACH_LOG_LINK (link
, i3
)
4728 if ((set
= single_set (link
->insn
)) != 0
4729 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4730 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4732 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4735 && ! i0dest_in_i0src
4736 && REGNO (i0dest
) < reg_n_sets_max
)
4737 INC_REG_N_SETS (REGNO (i0dest
), -1);
4740 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4741 been made to this insn. The order is important, because newi2pat
4742 can affect nonzero_bits of newpat. */
4744 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4745 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4748 if (undobuf
.other_insn
!= NULL_RTX
)
4752 fprintf (dump_file
, "modifying other_insn ");
4753 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4755 df_insn_rescan (undobuf
.other_insn
);
4758 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4762 fprintf (dump_file
, "modifying insn i0 ");
4763 dump_insn_slim (dump_file
, i0
);
4765 df_insn_rescan (i0
);
4768 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4772 fprintf (dump_file
, "modifying insn i1 ");
4773 dump_insn_slim (dump_file
, i1
);
4775 df_insn_rescan (i1
);
4778 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4782 fprintf (dump_file
, "modifying insn i2 ");
4783 dump_insn_slim (dump_file
, i2
);
4785 df_insn_rescan (i2
);
4788 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4792 fprintf (dump_file
, "modifying insn i3 ");
4793 dump_insn_slim (dump_file
, i3
);
4795 df_insn_rescan (i3
);
4798 /* Set new_direct_jump_p if a new return or simple jump instruction
4799 has been created. Adjust the CFG accordingly. */
4800 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4802 *new_direct_jump_p
= 1;
4803 mark_jump_label (PATTERN (i3
), i3
, 0);
4804 update_cfg_for_uncondjump (i3
);
4807 if (undobuf
.other_insn
!= NULL_RTX
4808 && (returnjump_p (undobuf
.other_insn
)
4809 || any_uncondjump_p (undobuf
.other_insn
)))
4811 *new_direct_jump_p
= 1;
4812 update_cfg_for_uncondjump (undobuf
.other_insn
);
4815 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4816 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4818 basic_block bb
= BLOCK_FOR_INSN (i3
);
4820 remove_edge (split_block (bb
, i3
));
4821 emit_barrier_after_bb (bb
);
4822 *new_direct_jump_p
= 1;
4825 if (undobuf
.other_insn
4826 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4827 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4829 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4831 remove_edge (split_block (bb
, undobuf
.other_insn
));
4832 emit_barrier_after_bb (bb
);
4833 *new_direct_jump_p
= 1;
4836 /* A noop might also need cleaning up of CFG, if it comes from the
4837 simplification of a jump. */
4839 && GET_CODE (newpat
) == SET
4840 && SET_SRC (newpat
) == pc_rtx
4841 && SET_DEST (newpat
) == pc_rtx
)
4843 *new_direct_jump_p
= 1;
4844 update_cfg_for_uncondjump (i3
);
4847 if (undobuf
.other_insn
!= NULL_RTX
4848 && JUMP_P (undobuf
.other_insn
)
4849 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4850 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4851 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4853 *new_direct_jump_p
= 1;
4854 update_cfg_for_uncondjump (undobuf
.other_insn
);
4857 combine_successes
++;
4860 rtx_insn
*ret
= newi2pat
? i2
: i3
;
4861 if (added_links_insn
&& DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (ret
))
4862 ret
= added_links_insn
;
4863 if (added_notes_insn
&& DF_INSN_LUID (added_notes_insn
) < DF_INSN_LUID (ret
))
4864 ret
= added_notes_insn
;
4869 /* Get a marker for undoing to the current state. */
4872 get_undo_marker (void)
4874 return undobuf
.undos
;
4877 /* Undo the modifications up to the marker. */
4880 undo_to_marker (void *marker
)
4882 struct undo
*undo
, *next
;
4884 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4892 *undo
->where
.r
= undo
->old_contents
.r
;
4895 *undo
->where
.i
= undo
->old_contents
.i
;
4898 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4901 *undo
->where
.l
= undo
->old_contents
.l
;
4907 undo
->next
= undobuf
.frees
;
4908 undobuf
.frees
= undo
;
4911 undobuf
.undos
= (struct undo
*) marker
;
4914 /* Undo all the modifications recorded in undobuf. */
4922 /* We've committed to accepting the changes we made. Move all
4923 of the undos to the free list. */
4928 struct undo
*undo
, *next
;
4930 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4933 undo
->next
= undobuf
.frees
;
4934 undobuf
.frees
= undo
;
4939 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4940 where we have an arithmetic expression and return that point. LOC will
4943 try_combine will call this function to see if an insn can be split into
4947 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4950 enum rtx_code code
= GET_CODE (x
);
4952 unsigned HOST_WIDE_INT len
= 0;
4953 HOST_WIDE_INT pos
= 0;
4955 rtx inner
= NULL_RTX
;
4956 scalar_int_mode mode
, inner_mode
;
4958 /* First special-case some codes. */
4962 #ifdef INSN_SCHEDULING
4963 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4965 if (MEM_P (SUBREG_REG (x
)))
4968 return find_split_point (&SUBREG_REG (x
), insn
, false);
4971 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4972 using LO_SUM and HIGH. */
4973 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4974 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4976 machine_mode address_mode
= get_address_mode (x
);
4979 gen_rtx_LO_SUM (address_mode
,
4980 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4982 return &XEXP (XEXP (x
, 0), 0);
4985 /* If we have a PLUS whose second operand is a constant and the
4986 address is not valid, perhaps we can split it up using
4987 the machine-specific way to split large constants. We use
4988 the first pseudo-reg (one of the virtual regs) as a placeholder;
4989 it will not remain in the result. */
4990 if (GET_CODE (XEXP (x
, 0)) == PLUS
4991 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4992 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4993 MEM_ADDR_SPACE (x
)))
4995 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4996 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4999 /* This should have produced two insns, each of which sets our
5000 placeholder. If the source of the second is a valid address,
5001 we can put both sources together and make a split point
5005 && NEXT_INSN (seq
) != NULL_RTX
5006 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
5007 && NONJUMP_INSN_P (seq
)
5008 && GET_CODE (PATTERN (seq
)) == SET
5009 && SET_DEST (PATTERN (seq
)) == reg
5010 && ! reg_mentioned_p (reg
,
5011 SET_SRC (PATTERN (seq
)))
5012 && NONJUMP_INSN_P (NEXT_INSN (seq
))
5013 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
5014 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
5015 && memory_address_addr_space_p
5016 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
5017 MEM_ADDR_SPACE (x
)))
5019 rtx src1
= SET_SRC (PATTERN (seq
));
5020 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
5022 /* Replace the placeholder in SRC2 with SRC1. If we can
5023 find where in SRC2 it was placed, that can become our
5024 split point and we can replace this address with SRC2.
5025 Just try two obvious places. */
5027 src2
= replace_rtx (src2
, reg
, src1
);
5029 if (XEXP (src2
, 0) == src1
)
5030 split
= &XEXP (src2
, 0);
5031 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
5032 && XEXP (XEXP (src2
, 0), 0) == src1
)
5033 split
= &XEXP (XEXP (src2
, 0), 0);
5037 SUBST (XEXP (x
, 0), src2
);
5042 /* If that didn't work and we have a nested plus, like:
5043 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5044 is valid address, try to split (REG1 * CONST1). */
5045 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
5046 && !OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 0))
5047 && OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5048 && ! (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SUBREG
5049 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x
, 0),
5052 rtx tem
= XEXP (XEXP (XEXP (x
, 0), 0), 0);
5053 XEXP (XEXP (XEXP (x
, 0), 0), 0) = reg
;
5054 if (memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5055 MEM_ADDR_SPACE (x
)))
5057 XEXP (XEXP (XEXP (x
, 0), 0), 0) = tem
;
5058 return &XEXP (XEXP (XEXP (x
, 0), 0), 0);
5060 XEXP (XEXP (XEXP (x
, 0), 0), 0) = tem
;
5062 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
5063 && OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 0))
5064 && !OBJECT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5065 && ! (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == SUBREG
5066 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x
, 0),
5069 rtx tem
= XEXP (XEXP (XEXP (x
, 0), 0), 1);
5070 XEXP (XEXP (XEXP (x
, 0), 0), 1) = reg
;
5071 if (memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5072 MEM_ADDR_SPACE (x
)))
5074 XEXP (XEXP (XEXP (x
, 0), 0), 1) = tem
;
5075 return &XEXP (XEXP (XEXP (x
, 0), 0), 1);
5077 XEXP (XEXP (XEXP (x
, 0), 0), 1) = tem
;
5080 /* If that didn't work, perhaps the first operand is complex and
5081 needs to be computed separately, so make a split point there.
5082 This will occur on machines that just support REG + CONST
5083 and have a constant moved through some previous computation. */
5084 if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
5085 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
5086 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
5087 return &XEXP (XEXP (x
, 0), 0);
5090 /* If we have a PLUS whose first operand is complex, try computing it
5091 separately by making a split there. */
5092 if (GET_CODE (XEXP (x
, 0)) == PLUS
5093 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5095 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
5096 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
5097 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
5098 return &XEXP (XEXP (x
, 0), 0);
5102 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5103 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5104 we need to put the operand into a register. So split at that
5107 if (SET_DEST (x
) == cc0_rtx
5108 && GET_CODE (SET_SRC (x
)) != COMPARE
5109 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
5110 && !OBJECT_P (SET_SRC (x
))
5111 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
5112 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
5113 return &SET_SRC (x
);
5115 /* See if we can split SET_SRC as it stands. */
5116 split
= find_split_point (&SET_SRC (x
), insn
, true);
5117 if (split
&& split
!= &SET_SRC (x
))
5120 /* See if we can split SET_DEST as it stands. */
5121 split
= find_split_point (&SET_DEST (x
), insn
, false);
5122 if (split
&& split
!= &SET_DEST (x
))
5125 /* See if this is a bitfield assignment with everything constant. If
5126 so, this is an IOR of an AND, so split it into that. */
5127 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
5128 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
5130 && HWI_COMPUTABLE_MODE_P (inner_mode
)
5131 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
5132 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
5133 && CONST_INT_P (SET_SRC (x
))
5134 && ((INTVAL (XEXP (SET_DEST (x
), 1))
5135 + INTVAL (XEXP (SET_DEST (x
), 2)))
5136 <= GET_MODE_PRECISION (inner_mode
))
5137 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
5139 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
5140 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
5141 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
5142 rtx dest
= XEXP (SET_DEST (x
), 0);
5143 unsigned HOST_WIDE_INT mask
5144 = (HOST_WIDE_INT_1U
<< len
) - 1;
5147 if (BITS_BIG_ENDIAN
)
5148 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5150 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
5153 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
5156 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
5158 simplify_gen_binary (IOR
, inner_mode
,
5159 simplify_gen_binary (AND
, inner_mode
,
5164 SUBST (SET_DEST (x
), dest
);
5166 split
= find_split_point (&SET_SRC (x
), insn
, true);
5167 if (split
&& split
!= &SET_SRC (x
))
5171 /* Otherwise, see if this is an operation that we can split into two.
5172 If so, try to split that. */
5173 code
= GET_CODE (SET_SRC (x
));
5178 /* If we are AND'ing with a large constant that is only a single
5179 bit and the result is only being used in a context where we
5180 need to know if it is zero or nonzero, replace it with a bit
5181 extraction. This will avoid the large constant, which might
5182 have taken more than one insn to make. If the constant were
5183 not a valid argument to the AND but took only one insn to make,
5184 this is no worse, but if it took more than one insn, it will
5187 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5188 && REG_P (XEXP (SET_SRC (x
), 0))
5189 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5190 && REG_P (SET_DEST (x
))
5191 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5192 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5193 && XEXP (*split
, 0) == SET_DEST (x
)
5194 && XEXP (*split
, 1) == const0_rtx
)
5196 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5197 XEXP (SET_SRC (x
), 0),
5198 pos
, NULL_RTX
, 1, 1, 0, 0);
5199 if (extraction
!= 0)
5201 SUBST (SET_SRC (x
), extraction
);
5202 return find_split_point (loc
, insn
, false);
5208 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5209 is known to be on, this can be converted into a NEG of a shift. */
5210 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5211 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5212 && ((pos
= exact_log2 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5213 GET_MODE (XEXP (SET_SRC (x
),
5216 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5217 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5220 gen_rtx_LSHIFTRT (mode
,
5221 XEXP (SET_SRC (x
), 0),
5224 split
= find_split_point (&SET_SRC (x
), insn
, true);
5225 if (split
&& split
!= &SET_SRC (x
))
5231 inner
= XEXP (SET_SRC (x
), 0);
5233 /* We can't optimize if either mode is a partial integer
5234 mode as we don't know how many bits are significant
5236 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5237 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5241 len
= GET_MODE_PRECISION (inner_mode
);
5247 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5249 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5250 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5252 inner
= XEXP (SET_SRC (x
), 0);
5253 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5254 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5256 if (BITS_BIG_ENDIAN
)
5257 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5258 unsignedp
= (code
== ZERO_EXTRACT
);
5267 && known_subrange_p (pos
, len
,
5268 0, GET_MODE_PRECISION (GET_MODE (inner
)))
5269 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5271 /* For unsigned, we have a choice of a shift followed by an
5272 AND or two shifts. Use two shifts for field sizes where the
5273 constant might be too large. We assume here that we can
5274 always at least get 8-bit constants in an AND insn, which is
5275 true for every current RISC. */
5277 if (unsignedp
&& len
<= 8)
5279 unsigned HOST_WIDE_INT mask
5280 = (HOST_WIDE_INT_1U
<< len
) - 1;
5281 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5285 (mode
, gen_lowpart (mode
, inner
), pos_rtx
),
5286 gen_int_mode (mask
, mode
)));
5288 split
= find_split_point (&SET_SRC (x
), insn
, true);
5289 if (split
&& split
!= &SET_SRC (x
))
5294 int left_bits
= GET_MODE_PRECISION (mode
) - len
- pos
;
5295 int right_bits
= GET_MODE_PRECISION (mode
) - len
;
5298 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5299 gen_rtx_ASHIFT (mode
,
5300 gen_lowpart (mode
, inner
),
5301 gen_int_shift_amount (mode
, left_bits
)),
5302 gen_int_shift_amount (mode
, right_bits
)));
5304 split
= find_split_point (&SET_SRC (x
), insn
, true);
5305 if (split
&& split
!= &SET_SRC (x
))
5310 /* See if this is a simple operation with a constant as the second
5311 operand. It might be that this constant is out of range and hence
5312 could be used as a split point. */
5313 if (BINARY_P (SET_SRC (x
))
5314 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5315 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5316 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5317 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5318 return &XEXP (SET_SRC (x
), 1);
5320 /* Finally, see if this is a simple operation with its first operand
5321 not in a register. The operation might require this operand in a
5322 register, so return it as a split point. We can always do this
5323 because if the first operand were another operation, we would have
5324 already found it as a split point. */
5325 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5326 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5327 return &XEXP (SET_SRC (x
), 0);
5333 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5334 it is better to write this as (not (ior A B)) so we can split it.
5335 Similarly for IOR. */
5336 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5339 gen_rtx_NOT (GET_MODE (x
),
5340 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5342 XEXP (XEXP (x
, 0), 0),
5343 XEXP (XEXP (x
, 1), 0))));
5344 return find_split_point (loc
, insn
, set_src
);
5347 /* Many RISC machines have a large set of logical insns. If the
5348 second operand is a NOT, put it first so we will try to split the
5349 other operand first. */
5350 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5352 rtx tem
= XEXP (x
, 0);
5353 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5354 SUBST (XEXP (x
, 1), tem
);
5360 /* Canonicalization can produce (minus A (mult B C)), where C is a
5361 constant. It may be better to try splitting (plus (mult B -C) A)
5362 instead if this isn't a multiply by a power of two. */
5363 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5364 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5365 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5367 machine_mode mode
= GET_MODE (x
);
5368 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5369 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5370 SUBST (*loc
, gen_rtx_PLUS (mode
,
5372 XEXP (XEXP (x
, 1), 0),
5373 gen_int_mode (other_int
,
5376 return find_split_point (loc
, insn
, set_src
);
5379 /* Split at a multiply-accumulate instruction. However if this is
5380 the SET_SRC, we likely do not have such an instruction and it's
5381 worthless to try this split. */
5383 && (GET_CODE (XEXP (x
, 0)) == MULT
5384 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5385 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5392 /* Otherwise, select our actions depending on our rtx class. */
5393 switch (GET_RTX_CLASS (code
))
5395 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5397 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5402 case RTX_COMM_ARITH
:
5404 case RTX_COMM_COMPARE
:
5405 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5410 /* Some machines have (and (shift ...) ...) insns. If X is not
5411 an AND, but XEXP (X, 0) is, use it as our split point. */
5412 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5413 return &XEXP (x
, 0);
5415 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5421 /* Otherwise, we don't have a split point. */
5426 /* Throughout X, replace FROM with TO, and return the result.
5427 The result is TO if X is FROM;
5428 otherwise the result is X, but its contents may have been modified.
5429 If they were modified, a record was made in undobuf so that
5430 undo_all will (among other things) return X to its original state.
5432 If the number of changes necessary is too much to record to undo,
5433 the excess changes are not made, so the result is invalid.
5434 The changes already made can still be undone.
5435 undobuf.num_undo is incremented for such changes, so by testing that
5436 the caller can tell whether the result is valid.
5438 `n_occurrences' is incremented each time FROM is replaced.
5440 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5442 IN_COND is nonzero if we are at the top level of a condition.
5444 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5445 by copying if `n_occurrences' is nonzero. */
5448 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5450 enum rtx_code code
= GET_CODE (x
);
5451 machine_mode op0_mode
= VOIDmode
;
5456 /* Two expressions are equal if they are identical copies of a shared
5457 RTX or if they are both registers with the same register number
5460 #define COMBINE_RTX_EQUAL_P(X,Y) \
5462 || (REG_P (X) && REG_P (Y) \
5463 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5465 /* Do not substitute into clobbers of regs -- this will never result in
5467 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5470 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5473 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5476 /* If X and FROM are the same register but different modes, they
5477 will not have been seen as equal above. However, the log links code
5478 will make a LOG_LINKS entry for that case. If we do nothing, we
5479 will try to rerecognize our original insn and, when it succeeds,
5480 we will delete the feeding insn, which is incorrect.
5482 So force this insn not to match in this (rare) case. */
5483 if (! in_dest
&& code
== REG
&& REG_P (from
)
5484 && reg_overlap_mentioned_p (x
, from
))
5485 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5487 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5488 of which may contain things that can be combined. */
5489 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5492 /* It is possible to have a subexpression appear twice in the insn.
5493 Suppose that FROM is a register that appears within TO.
5494 Then, after that subexpression has been scanned once by `subst',
5495 the second time it is scanned, TO may be found. If we were
5496 to scan TO here, we would find FROM within it and create a
5497 self-referent rtl structure which is completely wrong. */
5498 if (COMBINE_RTX_EQUAL_P (x
, to
))
5501 /* Parallel asm_operands need special attention because all of the
5502 inputs are shared across the arms. Furthermore, unsharing the
5503 rtl results in recognition failures. Failure to handle this case
5504 specially can result in circular rtl.
5506 Solve this by doing a normal pass across the first entry of the
5507 parallel, and only processing the SET_DESTs of the subsequent
5510 if (code
== PARALLEL
5511 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5512 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5514 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5516 /* If this substitution failed, this whole thing fails. */
5517 if (GET_CODE (new_rtx
) == CLOBBER
5518 && XEXP (new_rtx
, 0) == const0_rtx
)
5521 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5523 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5525 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5528 && GET_CODE (dest
) != CC0
5529 && GET_CODE (dest
) != PC
)
5531 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5533 /* If this substitution failed, this whole thing fails. */
5534 if (GET_CODE (new_rtx
) == CLOBBER
5535 && XEXP (new_rtx
, 0) == const0_rtx
)
5538 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5544 len
= GET_RTX_LENGTH (code
);
5545 fmt
= GET_RTX_FORMAT (code
);
5547 /* We don't need to process a SET_DEST that is a register, CC0,
5548 or PC, so set up to skip this common case. All other cases
5549 where we want to suppress replacing something inside a
5550 SET_SRC are handled via the IN_DEST operand. */
5552 && (REG_P (SET_DEST (x
))
5553 || GET_CODE (SET_DEST (x
)) == CC0
5554 || GET_CODE (SET_DEST (x
)) == PC
))
5557 /* Trying to simplify the operands of a widening MULT is not likely
5558 to create RTL matching a machine insn. */
5560 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5561 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5562 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5563 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5564 && REG_P (XEXP (XEXP (x
, 0), 0))
5565 && REG_P (XEXP (XEXP (x
, 1), 0))
5570 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5573 op0_mode
= GET_MODE (XEXP (x
, 0));
5575 for (i
= 0; i
< len
; i
++)
5580 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5582 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5584 new_rtx
= (unique_copy
&& n_occurrences
5585 ? copy_rtx (to
) : to
);
5590 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5593 /* If this substitution failed, this whole thing
5595 if (GET_CODE (new_rtx
) == CLOBBER
5596 && XEXP (new_rtx
, 0) == const0_rtx
)
5600 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5603 else if (fmt
[i
] == 'e')
5605 /* If this is a register being set, ignore it. */
5606 new_rtx
= XEXP (x
, i
);
5609 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5611 || code
== STRICT_LOW_PART
))
5614 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5616 /* In general, don't install a subreg involving two
5617 modes not tieable. It can worsen register
5618 allocation, and can even make invalid reload
5619 insns, since the reg inside may need to be copied
5620 from in the outside mode, and that may be invalid
5621 if it is an fp reg copied in integer mode.
5623 We allow two exceptions to this: It is valid if
5624 it is inside another SUBREG and the mode of that
5625 SUBREG and the mode of the inside of TO is
5626 tieable and it is valid if X is a SET that copies
5629 if (GET_CODE (to
) == SUBREG
5630 && !targetm
.modes_tieable_p (GET_MODE (to
),
5631 GET_MODE (SUBREG_REG (to
)))
5632 && ! (code
== SUBREG
5633 && (targetm
.modes_tieable_p
5634 (GET_MODE (x
), GET_MODE (SUBREG_REG (to
)))))
5638 && XEXP (x
, 0) == cc0_rtx
))))
5639 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5643 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5644 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5647 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5649 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5653 /* If we are in a SET_DEST, suppress most cases unless we
5654 have gone inside a MEM, in which case we want to
5655 simplify the address. We assume here that things that
5656 are actually part of the destination have their inner
5657 parts in the first expression. This is true for SUBREG,
5658 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5659 things aside from REG and MEM that should appear in a
5661 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5663 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5664 || code
== ZERO_EXTRACT
))
5667 code
== IF_THEN_ELSE
&& i
== 0,
5670 /* If we found that we will have to reject this combination,
5671 indicate that by returning the CLOBBER ourselves, rather than
5672 an expression containing it. This will speed things up as
5673 well as prevent accidents where two CLOBBERs are considered
5674 to be equal, thus producing an incorrect simplification. */
5676 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5679 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5681 machine_mode mode
= GET_MODE (x
);
5683 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5684 GET_MODE (SUBREG_REG (x
)),
5687 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5689 else if (CONST_SCALAR_INT_P (new_rtx
)
5690 && (GET_CODE (x
) == ZERO_EXTEND
5691 || GET_CODE (x
) == FLOAT
5692 || GET_CODE (x
) == UNSIGNED_FLOAT
))
5694 x
= simplify_unary_operation (GET_CODE (x
), GET_MODE (x
),
5696 GET_MODE (XEXP (x
, 0)));
5698 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5701 SUBST (XEXP (x
, i
), new_rtx
);
5706 /* Check if we are loading something from the constant pool via float
5707 extension; in this case we would undo compress_float_constant
5708 optimization and degenerate constant load to an immediate value. */
5709 if (GET_CODE (x
) == FLOAT_EXTEND
5710 && MEM_P (XEXP (x
, 0))
5711 && MEM_READONLY_P (XEXP (x
, 0)))
5713 rtx tmp
= avoid_constant_pool_reference (x
);
5718 /* Try to simplify X. If the simplification changed the code, it is likely
5719 that further simplification will help, so loop, but limit the number
5720 of repetitions that will be performed. */
5722 for (i
= 0; i
< 4; i
++)
5724 /* If X is sufficiently simple, don't bother trying to do anything
5726 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5727 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5729 if (GET_CODE (x
) == code
)
5732 code
= GET_CODE (x
);
5734 /* We no longer know the original mode of operand 0 since we
5735 have changed the form of X) */
5736 op0_mode
= VOIDmode
;
5742 /* If X is a commutative operation whose operands are not in the canonical
5743 order, use substitutions to swap them. */
5746 maybe_swap_commutative_operands (rtx x
)
5748 if (COMMUTATIVE_ARITH_P (x
)
5749 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5751 rtx temp
= XEXP (x
, 0);
5752 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5753 SUBST (XEXP (x
, 1), temp
);
5757 /* Simplify X, a piece of RTL. We just operate on the expression at the
5758 outer level; call `subst' to simplify recursively. Return the new
5761 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5762 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5766 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5769 enum rtx_code code
= GET_CODE (x
);
5770 machine_mode mode
= GET_MODE (x
);
5771 scalar_int_mode int_mode
;
5775 /* If this is a commutative operation, put a constant last and a complex
5776 expression first. We don't need to do this for comparisons here. */
5777 maybe_swap_commutative_operands (x
);
5779 /* Try to fold this expression in case we have constants that weren't
5782 switch (GET_RTX_CLASS (code
))
5785 if (op0_mode
== VOIDmode
)
5786 op0_mode
= GET_MODE (XEXP (x
, 0));
5787 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5790 case RTX_COMM_COMPARE
:
5792 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5793 if (cmp_mode
== VOIDmode
)
5795 cmp_mode
= GET_MODE (XEXP (x
, 1));
5796 if (cmp_mode
== VOIDmode
)
5797 cmp_mode
= op0_mode
;
5799 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5800 XEXP (x
, 0), XEXP (x
, 1));
5803 case RTX_COMM_ARITH
:
5805 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5807 case RTX_BITFIELD_OPS
:
5809 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5810 XEXP (x
, 1), XEXP (x
, 2));
5819 code
= GET_CODE (temp
);
5820 op0_mode
= VOIDmode
;
5821 mode
= GET_MODE (temp
);
5824 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5825 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5826 things. Check for cases where both arms are testing the same
5829 Don't do anything if all operands are very simple. */
5832 && ((!OBJECT_P (XEXP (x
, 0))
5833 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5834 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5835 || (!OBJECT_P (XEXP (x
, 1))
5836 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5837 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5839 && (!OBJECT_P (XEXP (x
, 0))
5840 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5841 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5843 rtx cond
, true_rtx
, false_rtx
;
5845 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5847 /* If everything is a comparison, what we have is highly unlikely
5848 to be simpler, so don't use it. */
5849 && ! (COMPARISON_P (x
)
5850 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
)))
5851 /* Similarly, if we end up with one of the expressions the same
5852 as the original, it is certainly not simpler. */
5853 && ! rtx_equal_p (x
, true_rtx
)
5854 && ! rtx_equal_p (x
, false_rtx
))
5856 rtx cop1
= const0_rtx
;
5857 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5859 if (cond_code
== NE
&& COMPARISON_P (cond
))
5862 /* Simplify the alternative arms; this may collapse the true and
5863 false arms to store-flag values. Be careful to use copy_rtx
5864 here since true_rtx or false_rtx might share RTL with x as a
5865 result of the if_then_else_cond call above. */
5866 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5867 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5869 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5870 is unlikely to be simpler. */
5871 if (general_operand (true_rtx
, VOIDmode
)
5872 && general_operand (false_rtx
, VOIDmode
))
5874 enum rtx_code reversed
;
5876 /* Restarting if we generate a store-flag expression will cause
5877 us to loop. Just drop through in this case. */
5879 /* If the result values are STORE_FLAG_VALUE and zero, we can
5880 just make the comparison operation. */
5881 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5882 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5884 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5885 && ((reversed
= reversed_comparison_code_parts
5886 (cond_code
, cond
, cop1
, NULL
))
5888 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5891 /* Likewise, we can make the negate of a comparison operation
5892 if the result values are - STORE_FLAG_VALUE and zero. */
5893 else if (CONST_INT_P (true_rtx
)
5894 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5895 && false_rtx
== const0_rtx
)
5896 x
= simplify_gen_unary (NEG
, mode
,
5897 simplify_gen_relational (cond_code
,
5901 else if (CONST_INT_P (false_rtx
)
5902 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5903 && true_rtx
== const0_rtx
5904 && ((reversed
= reversed_comparison_code_parts
5905 (cond_code
, cond
, cop1
, NULL
))
5907 x
= simplify_gen_unary (NEG
, mode
,
5908 simplify_gen_relational (reversed
,
5913 return gen_rtx_IF_THEN_ELSE (mode
,
5914 simplify_gen_relational (cond_code
,
5919 true_rtx
, false_rtx
);
5921 code
= GET_CODE (x
);
5922 op0_mode
= VOIDmode
;
5927 /* First see if we can apply the inverse distributive law. */
5928 if (code
== PLUS
|| code
== MINUS
5929 || code
== AND
|| code
== IOR
|| code
== XOR
)
5931 x
= apply_distributive_law (x
);
5932 code
= GET_CODE (x
);
5933 op0_mode
= VOIDmode
;
5936 /* If CODE is an associative operation not otherwise handled, see if we
5937 can associate some operands. This can win if they are constants or
5938 if they are logically related (i.e. (a & b) & a). */
5939 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5940 || code
== AND
|| code
== IOR
|| code
== XOR
5941 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5942 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5943 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5945 if (GET_CODE (XEXP (x
, 0)) == code
)
5947 rtx other
= XEXP (XEXP (x
, 0), 0);
5948 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5949 rtx inner_op1
= XEXP (x
, 1);
5952 /* Make sure we pass the constant operand if any as the second
5953 one if this is a commutative operation. */
5954 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5955 std::swap (inner_op0
, inner_op1
);
5956 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5957 : code
== DIV
? MULT
5959 mode
, inner_op0
, inner_op1
);
5961 /* For commutative operations, try the other pair if that one
5963 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5965 other
= XEXP (XEXP (x
, 0), 1);
5966 inner
= simplify_binary_operation (code
, mode
,
5967 XEXP (XEXP (x
, 0), 0),
5972 return simplify_gen_binary (code
, mode
, other
, inner
);
5976 /* A little bit of algebraic simplification here. */
5980 /* Ensure that our address has any ASHIFTs converted to MULT in case
5981 address-recognizing predicates are called later. */
5982 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5983 SUBST (XEXP (x
, 0), temp
);
5987 if (op0_mode
== VOIDmode
)
5988 op0_mode
= GET_MODE (SUBREG_REG (x
));
5990 /* See if this can be moved to simplify_subreg. */
5991 if (CONSTANT_P (SUBREG_REG (x
))
5992 && known_eq (subreg_lowpart_offset (mode
, op0_mode
), SUBREG_BYTE (x
))
5993 /* Don't call gen_lowpart if the inner mode
5994 is VOIDmode and we cannot simplify it, as SUBREG without
5995 inner mode is invalid. */
5996 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5997 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5998 return gen_lowpart (mode
, SUBREG_REG (x
));
6000 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
6004 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
6009 /* If op is known to have all lower bits zero, the result is zero. */
6010 scalar_int_mode int_mode
, int_op0_mode
;
6012 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6013 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
6014 && (GET_MODE_PRECISION (int_mode
)
6015 < GET_MODE_PRECISION (int_op0_mode
))
6016 && known_eq (subreg_lowpart_offset (int_mode
, int_op0_mode
),
6018 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
6019 && ((nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
6020 & GET_MODE_MASK (int_mode
)) == 0)
6021 && !side_effects_p (SUBREG_REG (x
)))
6022 return CONST0_RTX (int_mode
);
6025 /* Don't change the mode of the MEM if that would change the meaning
6027 if (MEM_P (SUBREG_REG (x
))
6028 && (MEM_VOLATILE_P (SUBREG_REG (x
))
6029 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
6030 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
6031 return gen_rtx_CLOBBER (mode
, const0_rtx
);
6033 /* Note that we cannot do any narrowing for non-constants since
6034 we might have been counting on using the fact that some bits were
6035 zero. We now do this in the SET. */
6040 temp
= expand_compound_operation (XEXP (x
, 0));
6042 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6043 replaced by (lshiftrt X C). This will convert
6044 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
6046 if (GET_CODE (temp
) == ASHIFTRT
6047 && CONST_INT_P (XEXP (temp
, 1))
6048 && INTVAL (XEXP (temp
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
6049 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
6050 INTVAL (XEXP (temp
, 1)));
6052 /* If X has only a single bit that might be nonzero, say, bit I, convert
6053 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6054 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
6055 (sign_extract X 1 Y). But only do this if TEMP isn't a register
6056 or a SUBREG of one since we'd be making the expression more
6057 complex if it was just a register. */
6060 && ! (GET_CODE (temp
) == SUBREG
6061 && REG_P (SUBREG_REG (temp
)))
6062 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6063 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
6065 rtx temp1
= simplify_shift_const
6066 (NULL_RTX
, ASHIFTRT
, int_mode
,
6067 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
6068 GET_MODE_PRECISION (int_mode
) - 1 - i
),
6069 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6071 /* If all we did was surround TEMP with the two shifts, we
6072 haven't improved anything, so don't use it. Otherwise,
6073 we are better off with TEMP1. */
6074 if (GET_CODE (temp1
) != ASHIFTRT
6075 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
6076 || XEXP (XEXP (temp1
, 0), 0) != temp
)
6082 /* We can't handle truncation to a partial integer mode here
6083 because we don't know the real bitsize of the partial
6085 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
6088 if (HWI_COMPUTABLE_MODE_P (mode
))
6090 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
6091 GET_MODE_MASK (mode
), 0));
6093 /* We can truncate a constant value and return it. */
6096 if (poly_int_rtx_p (XEXP (x
, 0), &c
))
6097 return gen_int_mode (c
, mode
);
6100 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6101 whose value is a comparison can be replaced with a subreg if
6102 STORE_FLAG_VALUE permits. */
6103 if (HWI_COMPUTABLE_MODE_P (mode
)
6104 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
6105 && (temp
= get_last_value (XEXP (x
, 0)))
6106 && COMPARISON_P (temp
))
6107 return gen_lowpart (mode
, XEXP (x
, 0));
6111 /* (const (const X)) can become (const X). Do it this way rather than
6112 returning the inner CONST since CONST can be shared with a
6114 if (GET_CODE (XEXP (x
, 0)) == CONST
)
6115 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
6119 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6120 can add in an offset. find_split_point will split this address up
6121 again if it doesn't match. */
6122 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
6123 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6128 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6129 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6130 bit-field and can be replaced by either a sign_extend or a
6131 sign_extract. The `and' may be a zero_extend and the two
6132 <c>, -<c> constants may be reversed. */
6133 if (GET_CODE (XEXP (x
, 0)) == XOR
6134 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6135 && CONST_INT_P (XEXP (x
, 1))
6136 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
6137 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
6138 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
6139 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
6140 && HWI_COMPUTABLE_MODE_P (int_mode
)
6141 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
6142 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6143 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6144 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
6145 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
6146 && known_eq ((GET_MODE_PRECISION
6147 (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))),
6148 (unsigned int) i
+ 1))))
6149 return simplify_shift_const
6150 (NULL_RTX
, ASHIFTRT
, int_mode
,
6151 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6152 XEXP (XEXP (XEXP (x
, 0), 0), 0),
6153 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
6154 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
6156 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6157 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6158 the bitsize of the mode - 1. This allows simplification of
6159 "a = (b & 8) == 0;" */
6160 if (XEXP (x
, 1) == constm1_rtx
6161 && !REG_P (XEXP (x
, 0))
6162 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
6163 && REG_P (SUBREG_REG (XEXP (x
, 0))))
6164 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6165 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
6166 return simplify_shift_const
6167 (NULL_RTX
, ASHIFTRT
, int_mode
,
6168 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6169 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
6171 GET_MODE_PRECISION (int_mode
) - 1),
6172 GET_MODE_PRECISION (int_mode
) - 1);
6174 /* If we are adding two things that have no bits in common, convert
6175 the addition into an IOR. This will often be further simplified,
6176 for example in cases like ((a & 1) + (a & 2)), which can
6179 if (HWI_COMPUTABLE_MODE_P (mode
)
6180 && (nonzero_bits (XEXP (x
, 0), mode
)
6181 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
6183 /* Try to simplify the expression further. */
6184 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6185 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
6187 /* If we could, great. If not, do not go ahead with the IOR
6188 replacement, since PLUS appears in many special purpose
6189 address arithmetic instructions. */
6190 if (GET_CODE (temp
) != CLOBBER
6191 && (GET_CODE (temp
) != IOR
6192 || ((XEXP (temp
, 0) != XEXP (x
, 0)
6193 || XEXP (temp
, 1) != XEXP (x
, 1))
6194 && (XEXP (temp
, 0) != XEXP (x
, 1)
6195 || XEXP (temp
, 1) != XEXP (x
, 0)))))
6199 /* Canonicalize x + x into x << 1. */
6200 if (GET_MODE_CLASS (mode
) == MODE_INT
6201 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6202 && !side_effects_p (XEXP (x
, 0)))
6203 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6208 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6209 (and <foo> (const_int pow2-1)) */
6210 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6211 && GET_CODE (XEXP (x
, 1)) == AND
6212 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6213 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6214 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6215 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6216 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6220 /* If we have (mult (plus A B) C), apply the distributive law and then
6221 the inverse distributive law to see if things simplify. This
6222 occurs mostly in addresses, often when unrolling loops. */
6224 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6226 rtx result
= distribute_and_simplify_rtx (x
, 0);
6231 /* Try simplify a*(b/c) as (a*b)/c. */
6232 if (FLOAT_MODE_P (mode
) && flag_associative_math
6233 && GET_CODE (XEXP (x
, 0)) == DIV
)
6235 rtx tem
= simplify_binary_operation (MULT
, mode
,
6236 XEXP (XEXP (x
, 0), 0),
6239 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6244 /* If this is a divide by a power of two, treat it as a shift if
6245 its first operand is a shift. */
6246 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6247 && CONST_INT_P (XEXP (x
, 1))
6248 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6249 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6250 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6251 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6252 || GET_CODE (XEXP (x
, 0)) == ROTATE
6253 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6254 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6259 case GT
: case GTU
: case GE
: case GEU
:
6260 case LT
: case LTU
: case LE
: case LEU
:
6261 case UNEQ
: case LTGT
:
6262 case UNGT
: case UNGE
:
6263 case UNLT
: case UNLE
:
6264 case UNORDERED
: case ORDERED
:
6265 /* If the first operand is a condition code, we can't do anything
6267 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6268 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6269 && ! CC0_P (XEXP (x
, 0))))
6271 rtx op0
= XEXP (x
, 0);
6272 rtx op1
= XEXP (x
, 1);
6273 enum rtx_code new_code
;
6275 if (GET_CODE (op0
) == COMPARE
)
6276 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6278 /* Simplify our comparison, if possible. */
6279 new_code
= simplify_comparison (code
, &op0
, &op1
);
6281 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6282 if only the low-order bit is possibly nonzero in X (such as when
6283 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6284 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6285 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6288 Remove any ZERO_EXTRACT we made when thinking this was a
6289 comparison. It may now be simpler to use, e.g., an AND. If a
6290 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6291 the call to make_compound_operation in the SET case.
6293 Don't apply these optimizations if the caller would
6294 prefer a comparison rather than a value.
6295 E.g., for the condition in an IF_THEN_ELSE most targets need
6296 an explicit comparison. */
6301 else if (STORE_FLAG_VALUE
== 1
6303 && is_int_mode (mode
, &int_mode
)
6304 && op1
== const0_rtx
6305 && int_mode
== GET_MODE (op0
)
6306 && nonzero_bits (op0
, int_mode
) == 1)
6307 return gen_lowpart (int_mode
,
6308 expand_compound_operation (op0
));
6310 else if (STORE_FLAG_VALUE
== 1
6312 && is_int_mode (mode
, &int_mode
)
6313 && op1
== const0_rtx
6314 && int_mode
== GET_MODE (op0
)
6315 && (num_sign_bit_copies (op0
, int_mode
)
6316 == GET_MODE_PRECISION (int_mode
)))
6318 op0
= expand_compound_operation (op0
);
6319 return simplify_gen_unary (NEG
, int_mode
,
6320 gen_lowpart (int_mode
, op0
),
6324 else if (STORE_FLAG_VALUE
== 1
6326 && is_int_mode (mode
, &int_mode
)
6327 && op1
== const0_rtx
6328 && int_mode
== GET_MODE (op0
)
6329 && nonzero_bits (op0
, int_mode
) == 1)
6331 op0
= expand_compound_operation (op0
);
6332 return simplify_gen_binary (XOR
, int_mode
,
6333 gen_lowpart (int_mode
, op0
),
6337 else if (STORE_FLAG_VALUE
== 1
6339 && is_int_mode (mode
, &int_mode
)
6340 && op1
== const0_rtx
6341 && int_mode
== GET_MODE (op0
)
6342 && (num_sign_bit_copies (op0
, int_mode
)
6343 == GET_MODE_PRECISION (int_mode
)))
6345 op0
= expand_compound_operation (op0
);
6346 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6349 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6354 else if (STORE_FLAG_VALUE
== -1
6356 && is_int_mode (mode
, &int_mode
)
6357 && op1
== const0_rtx
6358 && int_mode
== GET_MODE (op0
)
6359 && (num_sign_bit_copies (op0
, int_mode
)
6360 == GET_MODE_PRECISION (int_mode
)))
6361 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6363 else if (STORE_FLAG_VALUE
== -1
6365 && is_int_mode (mode
, &int_mode
)
6366 && op1
== const0_rtx
6367 && int_mode
== GET_MODE (op0
)
6368 && nonzero_bits (op0
, int_mode
) == 1)
6370 op0
= expand_compound_operation (op0
);
6371 return simplify_gen_unary (NEG
, int_mode
,
6372 gen_lowpart (int_mode
, op0
),
6376 else if (STORE_FLAG_VALUE
== -1
6378 && is_int_mode (mode
, &int_mode
)
6379 && op1
== const0_rtx
6380 && int_mode
== GET_MODE (op0
)
6381 && (num_sign_bit_copies (op0
, int_mode
)
6382 == GET_MODE_PRECISION (int_mode
)))
6384 op0
= expand_compound_operation (op0
);
6385 return simplify_gen_unary (NOT
, int_mode
,
6386 gen_lowpart (int_mode
, op0
),
6390 /* If X is 0/1, (eq X 0) is X-1. */
6391 else if (STORE_FLAG_VALUE
== -1
6393 && is_int_mode (mode
, &int_mode
)
6394 && op1
== const0_rtx
6395 && int_mode
== GET_MODE (op0
)
6396 && nonzero_bits (op0
, int_mode
) == 1)
6398 op0
= expand_compound_operation (op0
);
6399 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6402 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6403 one bit that might be nonzero, we can convert (ne x 0) to
6404 (ashift x c) where C puts the bit in the sign bit. Remove any
6405 AND with STORE_FLAG_VALUE when we are done, since we are only
6406 going to test the sign bit. */
6408 && is_int_mode (mode
, &int_mode
)
6409 && HWI_COMPUTABLE_MODE_P (int_mode
)
6410 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6411 && op1
== const0_rtx
6412 && int_mode
== GET_MODE (op0
)
6413 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6415 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6416 expand_compound_operation (op0
),
6417 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6418 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6424 /* If the code changed, return a whole new comparison.
6425 We also need to avoid using SUBST in cases where
6426 simplify_comparison has widened a comparison with a CONST_INT,
6427 since in that case the wider CONST_INT may fail the sanity
6428 checks in do_SUBST. */
6429 if (new_code
!= code
6430 || (CONST_INT_P (op1
)
6431 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6432 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6433 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6435 /* Otherwise, keep this operation, but maybe change its operands.
6436 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6437 SUBST (XEXP (x
, 0), op0
);
6438 SUBST (XEXP (x
, 1), op1
);
6443 return simplify_if_then_else (x
);
6449 /* If we are processing SET_DEST, we are done. */
6453 return expand_compound_operation (x
);
6456 return simplify_set (x
);
6460 return simplify_logical (x
);
6467 /* If this is a shift by a constant amount, simplify it. */
6468 if (CONST_INT_P (XEXP (x
, 1)))
6469 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6470 INTVAL (XEXP (x
, 1)));
6472 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6474 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6476 << exact_log2 (GET_MODE_UNIT_BITSIZE
6489 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6492 simplify_if_then_else (rtx x
)
6494 machine_mode mode
= GET_MODE (x
);
6495 rtx cond
= XEXP (x
, 0);
6496 rtx true_rtx
= XEXP (x
, 1);
6497 rtx false_rtx
= XEXP (x
, 2);
6498 enum rtx_code true_code
= GET_CODE (cond
);
6499 int comparison_p
= COMPARISON_P (cond
);
6502 enum rtx_code false_code
;
6504 scalar_int_mode int_mode
, inner_mode
;
6506 /* Simplify storing of the truth value. */
6507 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6508 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6509 XEXP (cond
, 0), XEXP (cond
, 1));
6511 /* Also when the truth value has to be reversed. */
6513 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6514 && (reversed
= reversed_comparison (cond
, mode
)))
6517 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6518 in it is being compared against certain values. Get the true and false
6519 comparisons and see if that says anything about the value of each arm. */
6522 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6524 && REG_P (XEXP (cond
, 0)))
6527 rtx from
= XEXP (cond
, 0);
6528 rtx true_val
= XEXP (cond
, 1);
6529 rtx false_val
= true_val
;
6532 /* If FALSE_CODE is EQ, swap the codes and arms. */
6534 if (false_code
== EQ
)
6536 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6537 std::swap (true_rtx
, false_rtx
);
6540 scalar_int_mode from_mode
;
6541 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6543 /* If we are comparing against zero and the expression being
6544 tested has only a single bit that might be nonzero, that is
6545 its value when it is not equal to zero. Similarly if it is
6546 known to be -1 or 0. */
6548 && true_val
== const0_rtx
6549 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6552 false_val
= gen_int_mode (nzb
, from_mode
);
6554 else if (true_code
== EQ
6555 && true_val
== const0_rtx
6556 && (num_sign_bit_copies (from
, from_mode
)
6557 == GET_MODE_PRECISION (from_mode
)))
6560 false_val
= constm1_rtx
;
6564 /* Now simplify an arm if we know the value of the register in the
6565 branch and it is used in the arm. Be careful due to the potential
6566 of locally-shared RTL. */
6568 if (reg_mentioned_p (from
, true_rtx
))
6569 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6571 pc_rtx
, pc_rtx
, 0, 0, 0);
6572 if (reg_mentioned_p (from
, false_rtx
))
6573 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6575 pc_rtx
, pc_rtx
, 0, 0, 0);
6577 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6578 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6580 true_rtx
= XEXP (x
, 1);
6581 false_rtx
= XEXP (x
, 2);
6582 true_code
= GET_CODE (cond
);
6585 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6586 reversed, do so to avoid needing two sets of patterns for
6587 subtract-and-branch insns. Similarly if we have a constant in the true
6588 arm, the false arm is the same as the first operand of the comparison, or
6589 the false arm is more complicated than the true arm. */
6592 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6593 && (true_rtx
== pc_rtx
6594 || (CONSTANT_P (true_rtx
)
6595 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6596 || true_rtx
== const0_rtx
6597 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6598 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6599 && !OBJECT_P (false_rtx
))
6600 || reg_mentioned_p (true_rtx
, false_rtx
)
6601 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6603 true_code
= reversed_comparison_code (cond
, NULL
);
6604 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6605 SUBST (XEXP (x
, 1), false_rtx
);
6606 SUBST (XEXP (x
, 2), true_rtx
);
6608 std::swap (true_rtx
, false_rtx
);
6611 /* It is possible that the conditional has been simplified out. */
6612 true_code
= GET_CODE (cond
);
6613 comparison_p
= COMPARISON_P (cond
);
6616 /* If the two arms are identical, we don't need the comparison. */
6618 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6621 /* Convert a == b ? b : a to "a". */
6622 if (true_code
== EQ
&& ! side_effects_p (cond
)
6623 && !HONOR_NANS (mode
)
6624 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6625 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6627 else if (true_code
== NE
&& ! side_effects_p (cond
)
6628 && !HONOR_NANS (mode
)
6629 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6630 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6633 /* Look for cases where we have (abs x) or (neg (abs X)). */
6635 if (GET_MODE_CLASS (mode
) == MODE_INT
6637 && XEXP (cond
, 1) == const0_rtx
6638 && GET_CODE (false_rtx
) == NEG
6639 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6640 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6641 && ! side_effects_p (true_rtx
))
6646 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6650 simplify_gen_unary (NEG
, mode
,
6651 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6657 /* Look for MIN or MAX. */
6659 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6661 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6662 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6663 && ! side_effects_p (cond
))
6668 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6671 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6674 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6677 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6682 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6683 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6684 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6685 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6686 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6687 neither 1 or -1, but it isn't worth checking for. */
6689 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6691 && is_int_mode (mode
, &int_mode
)
6692 && ! side_effects_p (x
))
6694 rtx t
= make_compound_operation (true_rtx
, SET
);
6695 rtx f
= make_compound_operation (false_rtx
, SET
);
6696 rtx cond_op0
= XEXP (cond
, 0);
6697 rtx cond_op1
= XEXP (cond
, 1);
6698 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6699 scalar_int_mode m
= int_mode
;
6700 rtx z
= 0, c1
= NULL_RTX
;
6702 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6703 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6704 || GET_CODE (t
) == ASHIFT
6705 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6706 && rtx_equal_p (XEXP (t
, 0), f
))
6707 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6709 /* If an identity-zero op is commutative, check whether there
6710 would be a match if we swapped the operands. */
6711 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6712 || GET_CODE (t
) == XOR
)
6713 && rtx_equal_p (XEXP (t
, 1), f
))
6714 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6715 else if (GET_CODE (t
) == SIGN_EXTEND
6716 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6717 && (GET_CODE (XEXP (t
, 0)) == PLUS
6718 || GET_CODE (XEXP (t
, 0)) == MINUS
6719 || GET_CODE (XEXP (t
, 0)) == IOR
6720 || GET_CODE (XEXP (t
, 0)) == XOR
6721 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6722 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6723 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6724 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6725 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6726 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6727 && (num_sign_bit_copies (f
, GET_MODE (f
))
6729 (GET_MODE_PRECISION (int_mode
)
6730 - GET_MODE_PRECISION (inner_mode
))))
6732 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6733 extend_op
= SIGN_EXTEND
;
6736 else if (GET_CODE (t
) == SIGN_EXTEND
6737 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6738 && (GET_CODE (XEXP (t
, 0)) == PLUS
6739 || GET_CODE (XEXP (t
, 0)) == IOR
6740 || GET_CODE (XEXP (t
, 0)) == XOR
)
6741 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6742 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6743 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6744 && (num_sign_bit_copies (f
, GET_MODE (f
))
6746 (GET_MODE_PRECISION (int_mode
)
6747 - GET_MODE_PRECISION (inner_mode
))))
6749 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6750 extend_op
= SIGN_EXTEND
;
6753 else if (GET_CODE (t
) == ZERO_EXTEND
6754 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6755 && (GET_CODE (XEXP (t
, 0)) == PLUS
6756 || GET_CODE (XEXP (t
, 0)) == MINUS
6757 || GET_CODE (XEXP (t
, 0)) == IOR
6758 || GET_CODE (XEXP (t
, 0)) == XOR
6759 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6760 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6761 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6762 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6763 && HWI_COMPUTABLE_MODE_P (int_mode
)
6764 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6765 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6766 && ((nonzero_bits (f
, GET_MODE (f
))
6767 & ~GET_MODE_MASK (inner_mode
))
6770 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6771 extend_op
= ZERO_EXTEND
;
6774 else if (GET_CODE (t
) == ZERO_EXTEND
6775 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6776 && (GET_CODE (XEXP (t
, 0)) == PLUS
6777 || GET_CODE (XEXP (t
, 0)) == IOR
6778 || GET_CODE (XEXP (t
, 0)) == XOR
)
6779 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6780 && HWI_COMPUTABLE_MODE_P (int_mode
)
6781 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6782 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6783 && ((nonzero_bits (f
, GET_MODE (f
))
6784 & ~GET_MODE_MASK (inner_mode
))
6787 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6788 extend_op
= ZERO_EXTEND
;
6794 machine_mode cm
= m
;
6795 if ((op
== ASHIFT
|| op
== LSHIFTRT
|| op
== ASHIFTRT
)
6796 && GET_MODE (c1
) != VOIDmode
)
6798 temp
= subst (simplify_gen_relational (true_code
, cm
, VOIDmode
,
6799 cond_op0
, cond_op1
),
6800 pc_rtx
, pc_rtx
, 0, 0, 0);
6801 temp
= simplify_gen_binary (MULT
, cm
, temp
,
6802 simplify_gen_binary (MULT
, cm
, c1
,
6804 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6805 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6807 if (extend_op
!= UNKNOWN
)
6808 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6814 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6815 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6816 negation of a single bit, we can convert this operation to a shift. We
6817 can actually do this more generally, but it doesn't seem worth it. */
6820 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6821 && XEXP (cond
, 1) == const0_rtx
6822 && false_rtx
== const0_rtx
6823 && CONST_INT_P (true_rtx
)
6824 && ((nonzero_bits (XEXP (cond
, 0), int_mode
) == 1
6825 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6826 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6827 == GET_MODE_PRECISION (int_mode
))
6828 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6830 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6831 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6833 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6834 non-zero bit in A is C1. */
6835 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6836 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6837 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6838 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6839 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6840 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6841 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6843 rtx val
= XEXP (cond
, 0);
6844 if (inner_mode
== int_mode
)
6846 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6847 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6853 /* Simplify X, a SET expression. Return the new expression. */
6856 simplify_set (rtx x
)
6858 rtx src
= SET_SRC (x
);
6859 rtx dest
= SET_DEST (x
);
6861 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6862 rtx_insn
*other_insn
;
6864 scalar_int_mode int_mode
;
6866 /* (set (pc) (return)) gets written as (return). */
6867 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6870 /* Now that we know for sure which bits of SRC we are using, see if we can
6871 simplify the expression for the object knowing that we only need the
6874 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6876 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6877 SUBST (SET_SRC (x
), src
);
6880 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6881 the comparison result and try to simplify it unless we already have used
6882 undobuf.other_insn. */
6883 if ((GET_MODE_CLASS (mode
) == MODE_CC
6884 || GET_CODE (src
) == COMPARE
6886 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6887 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6888 && COMPARISON_P (*cc_use
)
6889 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6891 enum rtx_code old_code
= GET_CODE (*cc_use
);
6892 enum rtx_code new_code
;
6894 int other_changed
= 0;
6895 rtx inner_compare
= NULL_RTX
;
6896 machine_mode compare_mode
= GET_MODE (dest
);
6898 if (GET_CODE (src
) == COMPARE
)
6900 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6901 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6903 inner_compare
= op0
;
6904 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6908 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6910 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6913 new_code
= old_code
;
6914 else if (!CONSTANT_P (tmp
))
6916 new_code
= GET_CODE (tmp
);
6917 op0
= XEXP (tmp
, 0);
6918 op1
= XEXP (tmp
, 1);
6922 rtx pat
= PATTERN (other_insn
);
6923 undobuf
.other_insn
= other_insn
;
6924 SUBST (*cc_use
, tmp
);
6926 /* Attempt to simplify CC user. */
6927 if (GET_CODE (pat
) == SET
)
6929 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6930 if (new_rtx
!= NULL_RTX
)
6931 SUBST (SET_SRC (pat
), new_rtx
);
6934 /* Convert X into a no-op move. */
6935 SUBST (SET_DEST (x
), pc_rtx
);
6936 SUBST (SET_SRC (x
), pc_rtx
);
6940 /* Simplify our comparison, if possible. */
6941 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6943 #ifdef SELECT_CC_MODE
6944 /* If this machine has CC modes other than CCmode, check to see if we
6945 need to use a different CC mode here. */
6946 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6947 compare_mode
= GET_MODE (op0
);
6948 else if (inner_compare
6949 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6950 && new_code
== old_code
6951 && op0
== XEXP (inner_compare
, 0)
6952 && op1
== XEXP (inner_compare
, 1))
6953 compare_mode
= GET_MODE (inner_compare
);
6955 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6957 /* If the mode changed, we have to change SET_DEST, the mode in the
6958 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6959 a hard register, just build new versions with the proper mode. If it
6960 is a pseudo, we lose unless it is only time we set the pseudo, in
6961 which case we can safely change its mode. */
6962 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6964 if (can_change_dest_mode (dest
, 0, compare_mode
))
6966 unsigned int regno
= REGNO (dest
);
6969 if (regno
< FIRST_PSEUDO_REGISTER
)
6970 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6973 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6974 new_dest
= regno_reg_rtx
[regno
];
6977 SUBST (SET_DEST (x
), new_dest
);
6978 SUBST (XEXP (*cc_use
, 0), new_dest
);
6984 #endif /* SELECT_CC_MODE */
6986 /* If the code changed, we have to build a new comparison in
6987 undobuf.other_insn. */
6988 if (new_code
!= old_code
)
6990 int other_changed_previously
= other_changed
;
6991 unsigned HOST_WIDE_INT mask
;
6992 rtx old_cc_use
= *cc_use
;
6994 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6998 /* If the only change we made was to change an EQ into an NE or
6999 vice versa, OP0 has only one bit that might be nonzero, and OP1
7000 is zero, check if changing the user of the condition code will
7001 produce a valid insn. If it won't, we can keep the original code
7002 in that insn by surrounding our operation with an XOR. */
7004 if (((old_code
== NE
&& new_code
== EQ
)
7005 || (old_code
== EQ
&& new_code
== NE
))
7006 && ! other_changed_previously
&& op1
== const0_rtx
7007 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
7008 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
7010 rtx pat
= PATTERN (other_insn
), note
= 0;
7012 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
7013 && ! check_asm_operands (pat
)))
7015 *cc_use
= old_cc_use
;
7018 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
7026 undobuf
.other_insn
= other_insn
;
7028 /* Don't generate a compare of a CC with 0, just use that CC. */
7029 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
7031 SUBST (SET_SRC (x
), op0
);
7034 /* Otherwise, if we didn't previously have the same COMPARE we
7035 want, create it from scratch. */
7036 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
7037 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
7039 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
7045 /* Get SET_SRC in a form where we have placed back any
7046 compound expressions. Then do the checks below. */
7047 src
= make_compound_operation (src
, SET
);
7048 SUBST (SET_SRC (x
), src
);
7051 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7052 and X being a REG or (subreg (reg)), we may be able to convert this to
7053 (set (subreg:m2 x) (op)).
7055 We can always do this if M1 is narrower than M2 because that means that
7056 we only care about the low bits of the result.
7058 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7059 perform a narrower operation than requested since the high-order bits will
7060 be undefined. On machine where it is defined, this transformation is safe
7061 as long as M1 and M2 have the same number of words. */
7063 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
7064 && !OBJECT_P (SUBREG_REG (src
))
7065 && (known_equal_after_align_up
7066 (GET_MODE_SIZE (GET_MODE (src
)),
7067 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))),
7069 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
7070 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
7071 && !REG_CAN_CHANGE_MODE_P (REGNO (dest
),
7072 GET_MODE (SUBREG_REG (src
)),
7075 || (GET_CODE (dest
) == SUBREG
7076 && REG_P (SUBREG_REG (dest
)))))
7078 SUBST (SET_DEST (x
),
7079 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
7081 SUBST (SET_SRC (x
), SUBREG_REG (src
));
7083 src
= SET_SRC (x
), dest
= SET_DEST (x
);
7086 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7089 && partial_subreg_p (src
)
7090 && subreg_lowpart_p (src
))
7092 rtx inner
= SUBREG_REG (src
);
7093 machine_mode inner_mode
= GET_MODE (inner
);
7095 /* Here we make sure that we don't have a sign bit on. */
7096 if (val_signbit_known_clear_p (GET_MODE (src
),
7097 nonzero_bits (inner
, inner_mode
)))
7099 SUBST (SET_SRC (x
), inner
);
7104 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7105 would require a paradoxical subreg. Replace the subreg with a
7106 zero_extend to avoid the reload that would otherwise be required.
7107 Don't do this unless we have a scalar integer mode, otherwise the
7108 transformation is incorrect. */
7110 enum rtx_code extend_op
;
7111 if (paradoxical_subreg_p (src
)
7112 && MEM_P (SUBREG_REG (src
))
7113 && SCALAR_INT_MODE_P (GET_MODE (src
))
7114 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
7117 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
7122 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7123 are comparing an item known to be 0 or -1 against 0, use a logical
7124 operation instead. Check for one of the arms being an IOR of the other
7125 arm with some value. We compute three terms to be IOR'ed together. In
7126 practice, at most two will be nonzero. Then we do the IOR's. */
7128 if (GET_CODE (dest
) != PC
7129 && GET_CODE (src
) == IF_THEN_ELSE
7130 && is_int_mode (GET_MODE (src
), &int_mode
)
7131 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
7132 && XEXP (XEXP (src
, 0), 1) == const0_rtx
7133 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
7134 && (!HAVE_conditional_move
7135 || ! can_conditionally_move_p (int_mode
))
7136 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
7137 == GET_MODE_PRECISION (int_mode
))
7138 && ! side_effects_p (src
))
7140 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7141 ? XEXP (src
, 1) : XEXP (src
, 2));
7142 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7143 ? XEXP (src
, 2) : XEXP (src
, 1));
7144 rtx term1
= const0_rtx
, term2
, term3
;
7146 if (GET_CODE (true_rtx
) == IOR
7147 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
7148 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
7149 else if (GET_CODE (true_rtx
) == IOR
7150 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
7151 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
7152 else if (GET_CODE (false_rtx
) == IOR
7153 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
7154 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
7155 else if (GET_CODE (false_rtx
) == IOR
7156 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
7157 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
7159 term2
= simplify_gen_binary (AND
, int_mode
,
7160 XEXP (XEXP (src
, 0), 0), true_rtx
);
7161 term3
= simplify_gen_binary (AND
, int_mode
,
7162 simplify_gen_unary (NOT
, int_mode
,
7163 XEXP (XEXP (src
, 0), 0),
7168 simplify_gen_binary (IOR
, int_mode
,
7169 simplify_gen_binary (IOR
, int_mode
,
7176 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7177 whole thing fail. */
7178 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
7180 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
7183 /* Convert this into a field assignment operation, if possible. */
7184 return make_field_assignment (x
);
7187 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7191 simplify_logical (rtx x
)
7193 rtx op0
= XEXP (x
, 0);
7194 rtx op1
= XEXP (x
, 1);
7195 scalar_int_mode mode
;
7197 switch (GET_CODE (x
))
7200 /* We can call simplify_and_const_int only if we don't lose
7201 any (sign) bits when converting INTVAL (op1) to
7202 "unsigned HOST_WIDE_INT". */
7203 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
7204 && CONST_INT_P (op1
)
7205 && (HWI_COMPUTABLE_MODE_P (mode
)
7206 || INTVAL (op1
) > 0))
7208 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7209 if (GET_CODE (x
) != AND
)
7216 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7217 apply the distributive law and then the inverse distributive
7218 law to see if things simplify. */
7219 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7221 rtx result
= distribute_and_simplify_rtx (x
, 0);
7225 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7227 rtx result
= distribute_and_simplify_rtx (x
, 1);
7234 /* If we have (ior (and A B) C), apply the distributive law and then
7235 the inverse distributive law to see if things simplify. */
7237 if (GET_CODE (op0
) == AND
)
7239 rtx result
= distribute_and_simplify_rtx (x
, 0);
7244 if (GET_CODE (op1
) == AND
)
7246 rtx result
= distribute_and_simplify_rtx (x
, 1);
7259 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7260 operations" because they can be replaced with two more basic operations.
7261 ZERO_EXTEND is also considered "compound" because it can be replaced with
7262 an AND operation, which is simpler, though only one operation.
7264 The function expand_compound_operation is called with an rtx expression
7265 and will convert it to the appropriate shifts and AND operations,
7266 simplifying at each stage.
7268 The function make_compound_operation is called to convert an expression
7269 consisting of shifts and ANDs into the equivalent compound expression.
7270 It is the inverse of this function, loosely speaking. */
7273 expand_compound_operation (rtx x
)
7275 unsigned HOST_WIDE_INT pos
= 0, len
;
7277 unsigned int modewidth
;
7279 scalar_int_mode inner_mode
;
7281 switch (GET_CODE (x
))
7287 /* We can't necessarily use a const_int for a multiword mode;
7288 it depends on implicitly extending the value.
7289 Since we don't know the right way to extend it,
7290 we can't tell whether the implicit way is right.
7292 Even for a mode that is no wider than a const_int,
7293 we can't win, because we need to sign extend one of its bits through
7294 the rest of it, and we don't know which bit. */
7295 if (CONST_INT_P (XEXP (x
, 0)))
7298 /* Reject modes that aren't scalar integers because turning vector
7299 or complex modes into shifts causes problems. */
7300 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7303 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7304 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7305 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7306 reloaded. If not for that, MEM's would very rarely be safe.
7308 Reject modes bigger than a word, because we might not be able
7309 to reference a two-register group starting with an arbitrary register
7310 (and currently gen_lowpart might crash for a SUBREG). */
7312 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7315 len
= GET_MODE_PRECISION (inner_mode
);
7316 /* If the inner object has VOIDmode (the only way this can happen
7317 is if it is an ASM_OPERANDS), we can't do anything since we don't
7318 know how much masking to do. */
7330 /* If the operand is a CLOBBER, just return it. */
7331 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7334 if (!CONST_INT_P (XEXP (x
, 1))
7335 || !CONST_INT_P (XEXP (x
, 2)))
7338 /* Reject modes that aren't scalar integers because turning vector
7339 or complex modes into shifts causes problems. */
7340 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7343 len
= INTVAL (XEXP (x
, 1));
7344 pos
= INTVAL (XEXP (x
, 2));
7346 /* This should stay within the object being extracted, fail otherwise. */
7347 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7350 if (BITS_BIG_ENDIAN
)
7351 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7359 /* We've rejected non-scalar operations by now. */
7360 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7362 /* Convert sign extension to zero extension, if we know that the high
7363 bit is not set, as this is easier to optimize. It will be converted
7364 back to cheaper alternative in make_extraction. */
7365 if (GET_CODE (x
) == SIGN_EXTEND
7366 && HWI_COMPUTABLE_MODE_P (mode
)
7367 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7368 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7371 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7372 rtx temp2
= expand_compound_operation (temp
);
7374 /* Make sure this is a profitable operation. */
7375 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7376 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7378 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7379 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7385 /* We can optimize some special cases of ZERO_EXTEND. */
7386 if (GET_CODE (x
) == ZERO_EXTEND
)
7388 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7389 know that the last value didn't have any inappropriate bits
7391 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7392 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7393 && HWI_COMPUTABLE_MODE_P (mode
)
7394 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7395 & ~GET_MODE_MASK (inner_mode
)) == 0)
7396 return XEXP (XEXP (x
, 0), 0);
7398 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7399 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7400 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7401 && subreg_lowpart_p (XEXP (x
, 0))
7402 && HWI_COMPUTABLE_MODE_P (mode
)
7403 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7404 & ~GET_MODE_MASK (inner_mode
)) == 0)
7405 return SUBREG_REG (XEXP (x
, 0));
7407 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7408 is a comparison and STORE_FLAG_VALUE permits. This is like
7409 the first case, but it works even when MODE is larger
7410 than HOST_WIDE_INT. */
7411 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7412 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7413 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7414 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7415 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7416 return XEXP (XEXP (x
, 0), 0);
7418 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7419 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7420 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7421 && subreg_lowpart_p (XEXP (x
, 0))
7422 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7423 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7424 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7425 return SUBREG_REG (XEXP (x
, 0));
7429 /* If we reach here, we want to return a pair of shifts. The inner
7430 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7431 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7432 logical depending on the value of UNSIGNEDP.
7434 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7435 converted into an AND of a shift.
7437 We must check for the case where the left shift would have a negative
7438 count. This can happen in a case like (x >> 31) & 255 on machines
7439 that can't shift by a constant. On those machines, we would first
7440 combine the shift with the AND to produce a variable-position
7441 extraction. Then the constant of 31 would be substituted in
7442 to produce such a position. */
7444 modewidth
= GET_MODE_PRECISION (mode
);
7445 if (modewidth
>= pos
+ len
)
7447 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7448 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7450 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7451 tem
, modewidth
- pos
- len
);
7452 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7453 mode
, tem
, modewidth
- len
);
7455 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7456 tem
= simplify_and_const_int (NULL_RTX
, mode
,
7457 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7460 (HOST_WIDE_INT_1U
<< len
) - 1);
7462 /* Any other cases we can't handle. */
7465 /* If we couldn't do this for some reason, return the original
7467 if (GET_CODE (tem
) == CLOBBER
)
7473 /* X is a SET which contains an assignment of one object into
7474 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7475 or certain SUBREGS). If possible, convert it into a series of
7478 We half-heartedly support variable positions, but do not at all
7479 support variable lengths. */
7482 expand_field_assignment (const_rtx x
)
7485 rtx pos
; /* Always counts from low bit. */
7487 rtx mask
, cleared
, masked
;
7488 scalar_int_mode compute_mode
;
7490 /* Loop until we find something we can't simplify. */
7493 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7494 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7496 rtx x0
= XEXP (SET_DEST (x
), 0);
7497 if (!GET_MODE_PRECISION (GET_MODE (x0
)).is_constant (&len
))
7499 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7500 pos
= gen_int_mode (subreg_lsb (XEXP (SET_DEST (x
), 0)),
7503 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7504 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7506 inner
= XEXP (SET_DEST (x
), 0);
7507 if (!GET_MODE_PRECISION (GET_MODE (inner
)).is_constant (&inner_len
))
7510 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7511 pos
= XEXP (SET_DEST (x
), 2);
7513 /* A constant position should stay within the width of INNER. */
7514 if (CONST_INT_P (pos
) && INTVAL (pos
) + len
> inner_len
)
7517 if (BITS_BIG_ENDIAN
)
7519 if (CONST_INT_P (pos
))
7520 pos
= GEN_INT (inner_len
- len
- INTVAL (pos
));
7521 else if (GET_CODE (pos
) == MINUS
7522 && CONST_INT_P (XEXP (pos
, 1))
7523 && INTVAL (XEXP (pos
, 1)) == inner_len
- len
)
7524 /* If position is ADJUST - X, new position is X. */
7525 pos
= XEXP (pos
, 0);
7527 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7528 gen_int_mode (inner_len
- len
,
7534 /* If the destination is a subreg that overwrites the whole of the inner
7535 register, we can move the subreg to the source. */
7536 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7537 /* We need SUBREGs to compute nonzero_bits properly. */
7538 && nonzero_sign_valid
7539 && !read_modify_subreg_p (SET_DEST (x
)))
7541 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7543 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7550 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7551 inner
= SUBREG_REG (inner
);
7553 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7554 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7556 /* Don't do anything for vector or complex integral types. */
7557 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7560 /* Try to find an integral mode to pun with. */
7561 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7562 .exists (&compute_mode
))
7565 inner
= gen_lowpart (compute_mode
, inner
);
7568 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7569 if (len
>= HOST_BITS_PER_WIDE_INT
)
7572 /* Don't try to compute in too wide unsupported modes. */
7573 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7576 /* Now compute the equivalent expression. Make a copy of INNER
7577 for the SET_DEST in case it is a MEM into which we will substitute;
7578 we don't want shared RTL in that case. */
7579 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7581 cleared
= simplify_gen_binary (AND
, compute_mode
,
7582 simplify_gen_unary (NOT
, compute_mode
,
7583 simplify_gen_binary (ASHIFT
,
7588 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7589 simplify_gen_binary (
7591 gen_lowpart (compute_mode
, SET_SRC (x
)),
7595 x
= gen_rtx_SET (copy_rtx (inner
),
7596 simplify_gen_binary (IOR
, compute_mode
,
7603 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7604 it is an RTX that represents the (variable) starting position; otherwise,
7605 POS is the (constant) starting bit position. Both are counted from the LSB.
7607 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7609 IN_DEST is nonzero if this is a reference in the destination of a SET.
7610 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7611 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7614 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7615 ZERO_EXTRACT should be built even for bits starting at bit 0.
7617 MODE is the desired mode of the result (if IN_DEST == 0).
7619 The result is an RTX for the extraction or NULL_RTX if the target
7623 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7624 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7625 int in_dest
, int in_compare
)
7627 /* This mode describes the size of the storage area
7628 to fetch the overall value from. Within that, we
7629 ignore the POS lowest bits, etc. */
7630 machine_mode is_mode
= GET_MODE (inner
);
7631 machine_mode inner_mode
;
7632 scalar_int_mode wanted_inner_mode
;
7633 scalar_int_mode wanted_inner_reg_mode
= word_mode
;
7634 scalar_int_mode pos_mode
= word_mode
;
7635 machine_mode extraction_mode
= word_mode
;
7637 rtx orig_pos_rtx
= pos_rtx
;
7638 HOST_WIDE_INT orig_pos
;
7640 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7641 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7643 if (GET_CODE (inner
) == SUBREG
7644 && subreg_lowpart_p (inner
)
7645 && (paradoxical_subreg_p (inner
)
7646 /* If trying or potentionally trying to extract
7647 bits outside of is_mode, don't look through
7648 non-paradoxical SUBREGs. See PR82192. */
7649 || (pos_rtx
== NULL_RTX
7650 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))))
7652 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7653 consider just the QI as the memory to extract from.
7654 The subreg adds or removes high bits; its mode is
7655 irrelevant to the meaning of this extraction,
7656 since POS and LEN count from the lsb. */
7657 if (MEM_P (SUBREG_REG (inner
)))
7658 is_mode
= GET_MODE (SUBREG_REG (inner
));
7659 inner
= SUBREG_REG (inner
);
7661 else if (GET_CODE (inner
) == ASHIFT
7662 && CONST_INT_P (XEXP (inner
, 1))
7663 && pos_rtx
== 0 && pos
== 0
7664 && len
> UINTVAL (XEXP (inner
, 1)))
7666 /* We're extracting the least significant bits of an rtx
7667 (ashift X (const_int C)), where LEN > C. Extract the
7668 least significant (LEN - C) bits of X, giving an rtx
7669 whose mode is MODE, then shift it left C times. */
7670 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7671 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7672 unsignedp
, in_dest
, in_compare
);
7674 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7676 else if (GET_CODE (inner
) == TRUNCATE
7677 /* If trying or potentionally trying to extract
7678 bits outside of is_mode, don't look through
7679 TRUNCATE. See PR82192. */
7680 && pos_rtx
== NULL_RTX
7681 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7682 inner
= XEXP (inner
, 0);
7684 inner_mode
= GET_MODE (inner
);
7686 /* See if this can be done without an extraction. We never can if the
7687 width of the field is not the same as that of some integer mode. For
7688 registers, we can only avoid the extraction if the position is at the
7689 low-order bit and this is either not in the destination or we have the
7690 appropriate STRICT_LOW_PART operation available.
7692 For MEM, we can avoid an extract if the field starts on an appropriate
7693 boundary and we can change the mode of the memory reference. */
7695 scalar_int_mode tmode
;
7696 if (int_mode_for_size (len
, 1).exists (&tmode
)
7697 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7699 && (pos
== 0 || REG_P (inner
))
7700 && (inner_mode
== tmode
7702 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7703 || reg_truncated_to_mode (tmode
, inner
))
7706 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7707 || (MEM_P (inner
) && pos_rtx
== 0
7709 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7710 : BITS_PER_UNIT
)) == 0
7711 /* We can't do this if we are widening INNER_MODE (it
7712 may not be aligned, for one thing). */
7713 && !paradoxical_subreg_p (tmode
, inner_mode
)
7714 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
))
7715 && (inner_mode
== tmode
7716 || (! mode_dependent_address_p (XEXP (inner
, 0),
7717 MEM_ADDR_SPACE (inner
))
7718 && ! MEM_VOLATILE_P (inner
))))))
7720 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7721 field. If the original and current mode are the same, we need not
7722 adjust the offset. Otherwise, we do if bytes big endian.
7724 If INNER is not a MEM, get a piece consisting of just the field
7725 of interest (in this case POS % BITS_PER_WORD must be 0). */
7731 /* POS counts from lsb, but make OFFSET count in memory order. */
7732 if (BYTES_BIG_ENDIAN
)
7733 offset
= bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode
)
7736 offset
= pos
/ BITS_PER_UNIT
;
7738 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7740 else if (REG_P (inner
))
7742 if (tmode
!= inner_mode
)
7744 /* We can't call gen_lowpart in a DEST since we
7745 always want a SUBREG (see below) and it would sometimes
7746 return a new hard register. */
7750 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7752 /* Avoid creating invalid subregs, for example when
7753 simplifying (x>>32)&255. */
7754 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7757 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7760 new_rtx
= gen_lowpart (tmode
, inner
);
7766 new_rtx
= force_to_mode (inner
, tmode
,
7767 len
>= HOST_BITS_PER_WIDE_INT
7769 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7771 /* If this extraction is going into the destination of a SET,
7772 make a STRICT_LOW_PART unless we made a MEM. */
7775 return (MEM_P (new_rtx
) ? new_rtx
7776 : (GET_CODE (new_rtx
) != SUBREG
7777 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7778 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7783 if (CONST_SCALAR_INT_P (new_rtx
))
7784 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7785 mode
, new_rtx
, tmode
);
7787 /* If we know that no extraneous bits are set, and that the high
7788 bit is not set, convert the extraction to the cheaper of
7789 sign and zero extension, that are equivalent in these cases. */
7790 if (flag_expensive_optimizations
7791 && (HWI_COMPUTABLE_MODE_P (tmode
)
7792 && ((nonzero_bits (new_rtx
, tmode
)
7793 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7796 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7797 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7799 /* Prefer ZERO_EXTENSION, since it gives more information to
7801 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7802 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7807 /* Otherwise, sign- or zero-extend unless we already are in the
7810 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7814 /* Unless this is a COMPARE or we have a funny memory reference,
7815 don't do anything with zero-extending field extracts starting at
7816 the low-order bit since they are simple AND operations. */
7817 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7818 && ! in_compare
&& unsignedp
)
7821 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7822 if the position is not a constant and the length is not 1. In all
7823 other cases, we would only be going outside our object in cases when
7824 an original shift would have been undefined. */
7826 && ((pos_rtx
== 0 && maybe_gt (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7827 || (pos_rtx
!= 0 && len
!= 1)))
7830 enum extraction_pattern pattern
= (in_dest
? EP_insv
7831 : unsignedp
? EP_extzv
: EP_extv
);
7833 /* If INNER is not from memory, we want it to have the mode of a register
7834 extraction pattern's structure operand, or word_mode if there is no
7835 such pattern. The same applies to extraction_mode and pos_mode
7836 and their respective operands.
7838 For memory, assume that the desired extraction_mode and pos_mode
7839 are the same as for a register operation, since at present we don't
7840 have named patterns for aligned memory structures. */
7841 struct extraction_insn insn
;
7842 unsigned int inner_size
;
7843 if (GET_MODE_BITSIZE (inner_mode
).is_constant (&inner_size
)
7844 && get_best_reg_extraction_insn (&insn
, pattern
, inner_size
, mode
))
7846 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7847 pos_mode
= insn
.pos_mode
;
7848 extraction_mode
= insn
.field_mode
;
7851 /* Never narrow an object, since that might not be safe. */
7853 if (mode
!= VOIDmode
7854 && partial_subreg_p (extraction_mode
, mode
))
7855 extraction_mode
= mode
;
7857 /* Punt if len is too large for extraction_mode. */
7858 if (maybe_gt (len
, GET_MODE_PRECISION (extraction_mode
)))
7862 wanted_inner_mode
= wanted_inner_reg_mode
;
7865 /* Be careful not to go beyond the extracted object and maintain the
7866 natural alignment of the memory. */
7867 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7868 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7869 > GET_MODE_BITSIZE (wanted_inner_mode
))
7870 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7875 if (BITS_BIG_ENDIAN
)
7877 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7878 BITS_BIG_ENDIAN style. If position is constant, compute new
7879 position. Otherwise, build subtraction.
7880 Note that POS is relative to the mode of the original argument.
7881 If it's a MEM we need to recompute POS relative to that.
7882 However, if we're extracting from (or inserting into) a register,
7883 we want to recompute POS relative to wanted_inner_mode. */
7886 width
= GET_MODE_BITSIZE (wanted_inner_mode
);
7887 else if (!GET_MODE_BITSIZE (is_mode
).is_constant (&width
))
7891 pos
= width
- len
- pos
;
7894 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7895 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7897 /* POS may be less than 0 now, but we check for that below.
7898 Note that it can only be less than 0 if !MEM_P (inner). */
7901 /* If INNER has a wider mode, and this is a constant extraction, try to
7902 make it smaller and adjust the byte to point to the byte containing
7904 if (wanted_inner_mode
!= VOIDmode
7905 && inner_mode
!= wanted_inner_mode
7907 && partial_subreg_p (wanted_inner_mode
, is_mode
)
7909 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7910 && ! MEM_VOLATILE_P (inner
))
7912 poly_int64 offset
= 0;
7914 /* The computations below will be correct if the machine is big
7915 endian in both bits and bytes or little endian in bits and bytes.
7916 If it is mixed, we must adjust. */
7918 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7919 adjust OFFSET to compensate. */
7920 if (BYTES_BIG_ENDIAN
7921 && paradoxical_subreg_p (is_mode
, inner_mode
))
7922 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7924 /* We can now move to the desired byte. */
7925 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7926 * GET_MODE_SIZE (wanted_inner_mode
);
7927 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7929 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7930 && is_mode
!= wanted_inner_mode
)
7931 offset
= (GET_MODE_SIZE (is_mode
)
7932 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7934 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7937 /* If INNER is not memory, get it into the proper mode. If we are changing
7938 its mode, POS must be a constant and smaller than the size of the new
7940 else if (!MEM_P (inner
))
7942 /* On the LHS, don't create paradoxical subregs implicitely truncating
7943 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7945 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7949 if (GET_MODE (inner
) != wanted_inner_mode
7951 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7957 inner
= force_to_mode (inner
, wanted_inner_mode
,
7959 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7961 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7966 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7967 have to zero extend. Otherwise, we can just use a SUBREG.
7969 We dealt with constant rtxes earlier, so pos_rtx cannot
7970 have VOIDmode at this point. */
7972 && (GET_MODE_SIZE (pos_mode
)
7973 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7975 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7976 GET_MODE (pos_rtx
));
7978 /* If we know that no extraneous bits are set, and that the high
7979 bit is not set, convert extraction to cheaper one - either
7980 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7982 if (flag_expensive_optimizations
7983 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7984 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7985 & ~(((unsigned HOST_WIDE_INT
)
7986 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7990 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7991 GET_MODE (pos_rtx
));
7993 /* Prefer ZERO_EXTENSION, since it gives more information to
7995 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7996 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
8002 /* Make POS_RTX unless we already have it and it is correct. If we don't
8003 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
8005 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
8006 pos_rtx
= orig_pos_rtx
;
8008 else if (pos_rtx
== 0)
8009 pos_rtx
= GEN_INT (pos
);
8011 /* Make the required operation. See if we can use existing rtx. */
8012 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
8013 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
8015 new_rtx
= gen_lowpart (mode
, new_rtx
);
8020 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8021 can be commuted with any other operations in X. Return X without
8022 that shift if so. */
8025 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
8027 enum rtx_code code
= GET_CODE (x
);
8033 /* This is the shift itself. If it is wide enough, we will return
8034 either the value being shifted if the shift count is equal to
8035 COUNT or a shift for the difference. */
8036 if (CONST_INT_P (XEXP (x
, 1))
8037 && INTVAL (XEXP (x
, 1)) >= count
)
8038 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
8039 INTVAL (XEXP (x
, 1)) - count
);
8043 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
8044 return simplify_gen_unary (code
, mode
, tem
, mode
);
8048 case PLUS
: case IOR
: case XOR
: case AND
:
8049 /* If we can safely shift this constant and we find the inner shift,
8050 make a new operation. */
8051 if (CONST_INT_P (XEXP (x
, 1))
8052 && (UINTVAL (XEXP (x
, 1))
8053 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
8054 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
8056 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
8057 return simplify_gen_binary (code
, mode
, tem
,
8058 gen_int_mode (val
, mode
));
8069 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
8070 level of the expression and MODE is its mode. IN_CODE is as for
8071 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
8072 that should be used when recursing on operands of *X_PTR.
8074 There are two possible actions:
8076 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
8077 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8079 - Return a new rtx, which the caller returns directly. */
8082 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
8083 enum rtx_code in_code
,
8084 enum rtx_code
*next_code_ptr
)
8087 enum rtx_code next_code
= *next_code_ptr
;
8088 enum rtx_code code
= GET_CODE (x
);
8089 int mode_width
= GET_MODE_PRECISION (mode
);
8094 scalar_int_mode inner_mode
;
8095 bool equality_comparison
= false;
8099 equality_comparison
= true;
8103 /* Process depending on the code of this operation. If NEW is set
8104 nonzero, it will be returned. */
8109 /* Convert shifts by constants into multiplications if inside
8111 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
8112 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8113 && INTVAL (XEXP (x
, 1)) >= 0)
8115 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
8116 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
8118 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8119 if (GET_CODE (new_rtx
) == NEG
)
8121 new_rtx
= XEXP (new_rtx
, 0);
8124 multval
= trunc_int_for_mode (multval
, mode
);
8125 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
8132 lhs
= make_compound_operation (lhs
, next_code
);
8133 rhs
= make_compound_operation (rhs
, next_code
);
8134 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
8136 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
8138 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8140 else if (GET_CODE (lhs
) == MULT
8141 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
8143 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
8144 simplify_gen_unary (NEG
, mode
,
8147 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8151 SUBST (XEXP (x
, 0), lhs
);
8152 SUBST (XEXP (x
, 1), rhs
);
8154 maybe_swap_commutative_operands (x
);
8160 lhs
= make_compound_operation (lhs
, next_code
);
8161 rhs
= make_compound_operation (rhs
, next_code
);
8162 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
8164 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
8166 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8168 else if (GET_CODE (rhs
) == MULT
8169 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
8171 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
8172 simplify_gen_unary (NEG
, mode
,
8175 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8179 SUBST (XEXP (x
, 0), lhs
);
8180 SUBST (XEXP (x
, 1), rhs
);
8185 /* If the second operand is not a constant, we can't do anything
8187 if (!CONST_INT_P (XEXP (x
, 1)))
8190 /* If the constant is a power of two minus one and the first operand
8191 is a logical right shift, make an extraction. */
8192 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8193 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8195 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8196 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1),
8197 i
, 1, 0, in_code
== COMPARE
);
8200 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8201 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
8202 && subreg_lowpart_p (XEXP (x
, 0))
8203 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
8205 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
8206 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8208 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
8209 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
8210 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
8212 i
, 1, 0, in_code
== COMPARE
);
8214 /* If we narrowed the mode when dropping the subreg, then we lose. */
8215 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
8218 /* If that didn't give anything, see if the AND simplifies on
8220 if (!new_rtx
&& i
>= 0)
8222 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8223 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8224 0, in_code
== COMPARE
);
8227 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8228 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8229 || GET_CODE (XEXP (x
, 0)) == IOR
)
8230 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8231 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8232 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8234 /* Apply the distributive law, and then try to make extractions. */
8235 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8236 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8238 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8240 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8243 /* If we are have (and (rotate X C) M) and C is larger than the number
8244 of bits in M, this is an extraction. */
8246 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8247 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8248 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8249 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8251 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8252 new_rtx
= make_extraction (mode
, new_rtx
,
8253 (GET_MODE_PRECISION (mode
)
8254 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8255 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8258 /* On machines without logical shifts, if the operand of the AND is
8259 a logical shift and our mask turns off all the propagated sign
8260 bits, we can replace the logical shift with an arithmetic shift. */
8261 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8262 && !have_insn_for (LSHIFTRT
, mode
)
8263 && have_insn_for (ASHIFTRT
, mode
)
8264 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8265 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8266 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8267 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8269 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8271 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8272 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8274 gen_rtx_ASHIFTRT (mode
,
8275 make_compound_operation (XEXP (XEXP (x
,
8279 XEXP (XEXP (x
, 0), 1)));
8282 /* If the constant is one less than a power of two, this might be
8283 representable by an extraction even if no shift is present.
8284 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8285 we are in a COMPARE. */
8286 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8287 new_rtx
= make_extraction (mode
,
8288 make_compound_operation (XEXP (x
, 0),
8290 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8292 /* If we are in a comparison and this is an AND with a power of two,
8293 convert this into the appropriate bit extract. */
8294 else if (in_code
== COMPARE
8295 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8296 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8297 new_rtx
= make_extraction (mode
,
8298 make_compound_operation (XEXP (x
, 0),
8300 i
, NULL_RTX
, 1, 1, 0, 1);
8302 /* If the one operand is a paradoxical subreg of a register or memory and
8303 the constant (limited to the smaller mode) has only zero bits where
8304 the sub expression has known zero bits, this can be expressed as
8306 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8310 sub
= XEXP (XEXP (x
, 0), 0);
8311 machine_mode sub_mode
= GET_MODE (sub
);
8313 if ((REG_P (sub
) || MEM_P (sub
))
8314 && GET_MODE_PRECISION (sub_mode
).is_constant (&sub_width
)
8315 && sub_width
< mode_width
)
8317 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8318 unsigned HOST_WIDE_INT mask
;
8320 /* original AND constant with all the known zero bits set */
8321 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8322 if ((mask
& mode_mask
) == mode_mask
)
8324 new_rtx
= make_compound_operation (sub
, next_code
);
8325 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0, sub_width
,
8326 1, 0, in_code
== COMPARE
);
8334 /* If the sign bit is known to be zero, replace this with an
8335 arithmetic shift. */
8336 if (have_insn_for (ASHIFTRT
, mode
)
8337 && ! have_insn_for (LSHIFTRT
, mode
)
8338 && mode_width
<= HOST_BITS_PER_WIDE_INT
8339 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8341 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8342 make_compound_operation (XEXP (x
, 0),
8354 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8355 this is a SIGN_EXTRACT. */
8356 if (CONST_INT_P (rhs
)
8357 && GET_CODE (lhs
) == ASHIFT
8358 && CONST_INT_P (XEXP (lhs
, 1))
8359 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8360 && INTVAL (XEXP (lhs
, 1)) >= 0
8361 && INTVAL (rhs
) < mode_width
)
8363 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8364 new_rtx
= make_extraction (mode
, new_rtx
,
8365 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8366 NULL_RTX
, mode_width
- INTVAL (rhs
),
8367 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8371 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8372 If so, try to merge the shifts into a SIGN_EXTEND. We could
8373 also do this for some cases of SIGN_EXTRACT, but it doesn't
8374 seem worth the effort; the case checked for occurs on Alpha. */
8377 && ! (GET_CODE (lhs
) == SUBREG
8378 && (OBJECT_P (SUBREG_REG (lhs
))))
8379 && CONST_INT_P (rhs
)
8380 && INTVAL (rhs
) >= 0
8381 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8382 && INTVAL (rhs
) < mode_width
8383 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8384 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
,
8386 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8387 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8392 /* Call ourselves recursively on the inner expression. If we are
8393 narrowing the object and it has a different RTL code from
8394 what it originally did, do this SUBREG as a force_to_mode. */
8396 rtx inner
= SUBREG_REG (x
), simplified
;
8397 enum rtx_code subreg_code
= in_code
;
8399 /* If the SUBREG is masking of a logical right shift,
8400 make an extraction. */
8401 if (GET_CODE (inner
) == LSHIFTRT
8402 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8403 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8404 && CONST_INT_P (XEXP (inner
, 1))
8405 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8406 && subreg_lowpart_p (x
))
8408 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8409 int width
= GET_MODE_PRECISION (inner_mode
)
8410 - INTVAL (XEXP (inner
, 1));
8411 if (width
> mode_width
)
8413 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8414 width
, 1, 0, in_code
== COMPARE
);
8418 /* If in_code is COMPARE, it isn't always safe to pass it through
8419 to the recursive make_compound_operation call. */
8420 if (subreg_code
== COMPARE
8421 && (!subreg_lowpart_p (x
)
8422 || GET_CODE (inner
) == SUBREG
8423 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8424 is (const_int 0), rather than
8425 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8426 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8427 for non-equality comparisons against 0 is not equivalent
8428 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8429 || (GET_CODE (inner
) == AND
8430 && CONST_INT_P (XEXP (inner
, 1))
8431 && partial_subreg_p (x
)
8432 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8433 >= GET_MODE_BITSIZE (mode
) - 1)))
8436 tem
= make_compound_operation (inner
, subreg_code
);
8439 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8443 if (GET_CODE (tem
) != GET_CODE (inner
)
8444 && partial_subreg_p (x
)
8445 && subreg_lowpart_p (x
))
8448 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8450 /* If we have something other than a SUBREG, we might have
8451 done an expansion, so rerun ourselves. */
8452 if (GET_CODE (newer
) != SUBREG
)
8453 newer
= make_compound_operation (newer
, in_code
);
8455 /* force_to_mode can expand compounds. If it just re-expanded
8456 the compound, use gen_lowpart to convert to the desired
8458 if (rtx_equal_p (newer
, x
)
8459 /* Likewise if it re-expanded the compound only partially.
8460 This happens for SUBREG of ZERO_EXTRACT if they extract
8461 the same number of bits. */
8462 || (GET_CODE (newer
) == SUBREG
8463 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8464 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8465 && GET_CODE (inner
) == AND
8466 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8467 return gen_lowpart (GET_MODE (x
), tem
);
8482 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8483 *next_code_ptr
= next_code
;
8487 /* Look at the expression rooted at X. Look for expressions
8488 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8489 Form these expressions.
8491 Return the new rtx, usually just X.
8493 Also, for machines like the VAX that don't have logical shift insns,
8494 try to convert logical to arithmetic shift operations in cases where
8495 they are equivalent. This undoes the canonicalizations to logical
8496 shifts done elsewhere.
8498 We try, as much as possible, to re-use rtl expressions to save memory.
8500 IN_CODE says what kind of expression we are processing. Normally, it is
8501 SET. In a memory address it is MEM. When processing the arguments of
8502 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8503 precisely it is an equality comparison against zero. */
8506 make_compound_operation (rtx x
, enum rtx_code in_code
)
8508 enum rtx_code code
= GET_CODE (x
);
8511 enum rtx_code next_code
;
8514 /* Select the code to be used in recursive calls. Once we are inside an
8515 address, we stay there. If we have a comparison, set to COMPARE,
8516 but once inside, go back to our default of SET. */
8518 next_code
= (code
== MEM
? MEM
8519 : ((code
== COMPARE
|| COMPARISON_P (x
))
8520 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8521 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8523 scalar_int_mode mode
;
8524 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8526 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8530 code
= GET_CODE (x
);
8533 /* Now recursively process each operand of this operation. We need to
8534 handle ZERO_EXTEND specially so that we don't lose track of the
8536 if (code
== ZERO_EXTEND
)
8538 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8539 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8540 new_rtx
, GET_MODE (XEXP (x
, 0)));
8543 SUBST (XEXP (x
, 0), new_rtx
);
8547 fmt
= GET_RTX_FORMAT (code
);
8548 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8551 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8552 SUBST (XEXP (x
, i
), new_rtx
);
8554 else if (fmt
[i
] == 'E')
8555 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8557 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8558 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8561 maybe_swap_commutative_operands (x
);
8565 /* Given M see if it is a value that would select a field of bits
8566 within an item, but not the entire word. Return -1 if not.
8567 Otherwise, return the starting position of the field, where 0 is the
8570 *PLEN is set to the length of the field. */
8573 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8575 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8576 int pos
= m
? ctz_hwi (m
) : -1;
8580 /* Now shift off the low-order zero bits and see if we have a
8581 power of two minus 1. */
8582 len
= exact_log2 ((m
>> pos
) + 1);
8591 /* If X refers to a register that equals REG in value, replace these
8592 references with REG. */
8594 canon_reg_for_combine (rtx x
, rtx reg
)
8601 enum rtx_code code
= GET_CODE (x
);
8602 switch (GET_RTX_CLASS (code
))
8605 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8606 if (op0
!= XEXP (x
, 0))
8607 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8612 case RTX_COMM_ARITH
:
8613 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8614 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8615 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8616 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8620 case RTX_COMM_COMPARE
:
8621 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8622 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8623 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8624 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8625 GET_MODE (op0
), op0
, op1
);
8629 case RTX_BITFIELD_OPS
:
8630 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8631 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8632 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8633 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8634 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8635 GET_MODE (op0
), op0
, op1
, op2
);
8641 if (rtx_equal_p (get_last_value (reg
), x
)
8642 || rtx_equal_p (reg
, get_last_value (x
)))
8651 fmt
= GET_RTX_FORMAT (code
);
8653 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8656 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8657 if (op
!= XEXP (x
, i
))
8667 else if (fmt
[i
] == 'E')
8670 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8672 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8673 if (op
!= XVECEXP (x
, i
, j
))
8680 XVECEXP (x
, i
, j
) = op
;
8691 /* Return X converted to MODE. If the value is already truncated to
8692 MODE we can just return a subreg even though in the general case we
8693 would need an explicit truncation. */
8696 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8698 if (!CONST_INT_P (x
)
8699 && partial_subreg_p (mode
, GET_MODE (x
))
8700 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8701 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8703 /* Bit-cast X into an integer mode. */
8704 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8705 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8706 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8710 return gen_lowpart (mode
, x
);
8713 /* See if X can be simplified knowing that we will only refer to it in
8714 MODE and will only refer to those bits that are nonzero in MASK.
8715 If other bits are being computed or if masking operations are done
8716 that select a superset of the bits in MASK, they can sometimes be
8719 Return a possibly simplified expression, but always convert X to
8720 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8722 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8723 are all off in X. This is used when X will be complemented, by either
8724 NOT, NEG, or XOR. */
8727 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8730 enum rtx_code code
= GET_CODE (x
);
8731 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8732 machine_mode op_mode
;
8733 unsigned HOST_WIDE_INT nonzero
;
8735 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8736 code below will do the wrong thing since the mode of such an
8737 expression is VOIDmode.
8739 Also do nothing if X is a CLOBBER; this can happen if X was
8740 the return value from a call to gen_lowpart. */
8741 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8744 /* We want to perform the operation in its present mode unless we know
8745 that the operation is valid in MODE, in which case we do the operation
8747 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8748 && have_insn_for (code
, mode
))
8749 ? mode
: GET_MODE (x
));
8751 /* It is not valid to do a right-shift in a narrower mode
8752 than the one it came in with. */
8753 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8754 && partial_subreg_p (mode
, GET_MODE (x
)))
8755 op_mode
= GET_MODE (x
);
8757 /* Truncate MASK to fit OP_MODE. */
8759 mask
&= GET_MODE_MASK (op_mode
);
8761 /* Determine what bits of X are guaranteed to be (non)zero. */
8762 nonzero
= nonzero_bits (x
, mode
);
8764 /* If none of the bits in X are needed, return a zero. */
8765 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8768 /* If X is a CONST_INT, return a new one. Do this here since the
8769 test below will fail. */
8770 if (CONST_INT_P (x
))
8772 if (SCALAR_INT_MODE_P (mode
))
8773 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8776 x
= GEN_INT (INTVAL (x
) & mask
);
8777 return gen_lowpart_common (mode
, x
);
8781 /* If X is narrower than MODE and we want all the bits in X's mode, just
8782 get X in the proper mode. */
8783 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8784 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8785 return gen_lowpart (mode
, x
);
8787 /* We can ignore the effect of a SUBREG if it narrows the mode or
8788 if the constant masks to zero all the bits the mode doesn't have. */
8789 if (GET_CODE (x
) == SUBREG
8790 && subreg_lowpart_p (x
)
8791 && (partial_subreg_p (x
)
8793 & GET_MODE_MASK (GET_MODE (x
))
8794 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))) == 0))
8795 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8797 scalar_int_mode int_mode
, xmode
;
8798 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8799 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8800 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8802 return force_int_to_mode (x
, int_mode
, xmode
,
8803 as_a
<scalar_int_mode
> (op_mode
),
8806 return gen_lowpart_or_truncate (mode
, x
);
8809 /* Subroutine of force_to_mode that handles cases in which both X and
8810 the result are scalar integers. MODE is the mode of the result,
8811 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8812 is preferred for simplified versions of X. The other arguments
8813 are as for force_to_mode. */
8816 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8817 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8820 enum rtx_code code
= GET_CODE (x
);
8821 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8822 unsigned HOST_WIDE_INT fuller_mask
;
8824 poly_int64 const_op0
;
8826 /* When we have an arithmetic operation, or a shift whose count we
8827 do not know, we need to assume that all bits up to the highest-order
8828 bit in MASK will be needed. This is how we form such a mask. */
8829 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8830 fuller_mask
= HOST_WIDE_INT_M1U
;
8832 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8838 /* If X is a (clobber (const_int)), return it since we know we are
8839 generating something that won't match. */
8846 x
= expand_compound_operation (x
);
8847 if (GET_CODE (x
) != code
)
8848 return force_to_mode (x
, mode
, mask
, next_select
);
8852 /* Similarly for a truncate. */
8853 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8856 /* If this is an AND with a constant, convert it into an AND
8857 whose constant is the AND of that constant with MASK. If it
8858 remains an AND of MASK, delete it since it is redundant. */
8860 if (CONST_INT_P (XEXP (x
, 1)))
8862 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8863 mask
& INTVAL (XEXP (x
, 1)));
8866 /* If X is still an AND, see if it is an AND with a mask that
8867 is just some low-order bits. If so, and it is MASK, we don't
8870 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8871 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8874 /* If it remains an AND, try making another AND with the bits
8875 in the mode mask that aren't in MASK turned on. If the
8876 constant in the AND is wide enough, this might make a
8877 cheaper constant. */
8879 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8880 && GET_MODE_MASK (xmode
) != mask
8881 && HWI_COMPUTABLE_MODE_P (xmode
))
8883 unsigned HOST_WIDE_INT cval
8884 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8887 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8888 gen_int_mode (cval
, xmode
));
8889 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8890 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8900 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8901 low-order bits (as in an alignment operation) and FOO is already
8902 aligned to that boundary, mask C1 to that boundary as well.
8903 This may eliminate that PLUS and, later, the AND. */
8906 unsigned int width
= GET_MODE_PRECISION (mode
);
8907 unsigned HOST_WIDE_INT smask
= mask
;
8909 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8910 number, sign extend it. */
8912 if (width
< HOST_BITS_PER_WIDE_INT
8913 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8914 smask
|= HOST_WIDE_INT_M1U
<< width
;
8916 if (CONST_INT_P (XEXP (x
, 1))
8917 && pow2p_hwi (- smask
)
8918 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8919 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8920 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8921 (INTVAL (XEXP (x
, 1)) & smask
)),
8922 mode
, smask
, next_select
);
8928 /* Substituting into the operands of a widening MULT is not likely to
8929 create RTL matching a machine insn. */
8931 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8932 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8933 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8934 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8935 && REG_P (XEXP (XEXP (x
, 0), 0))
8936 && REG_P (XEXP (XEXP (x
, 1), 0)))
8937 return gen_lowpart_or_truncate (mode
, x
);
8939 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8940 most significant bit in MASK since carries from those bits will
8941 affect the bits we are interested in. */
8946 /* If X is (minus C Y) where C's least set bit is larger than any bit
8947 in the mask, then we may replace with (neg Y). */
8948 if (poly_int_rtx_p (XEXP (x
, 0), &const_op0
)
8949 && (unsigned HOST_WIDE_INT
) known_alignment (const_op0
) > mask
)
8951 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8952 return force_to_mode (x
, mode
, mask
, next_select
);
8955 /* Similarly, if C contains every bit in the fuller_mask, then we may
8956 replace with (not Y). */
8957 if (CONST_INT_P (XEXP (x
, 0))
8958 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8960 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8961 return force_to_mode (x
, mode
, mask
, next_select
);
8969 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8970 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8971 operation which may be a bitfield extraction. Ensure that the
8972 constant we form is not wider than the mode of X. */
8974 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8975 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8976 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8977 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8978 && CONST_INT_P (XEXP (x
, 1))
8979 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8980 + floor_log2 (INTVAL (XEXP (x
, 1))))
8981 < GET_MODE_PRECISION (xmode
))
8982 && (UINTVAL (XEXP (x
, 1))
8983 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8985 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8986 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8988 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8989 XEXP (XEXP (x
, 0), 0), temp
);
8990 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8991 XEXP (XEXP (x
, 0), 1));
8992 return force_to_mode (x
, mode
, mask
, next_select
);
8996 /* For most binary operations, just propagate into the operation and
8997 change the mode if we have an operation of that mode. */
8999 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9000 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
9002 /* If we ended up truncating both operands, truncate the result of the
9003 operation instead. */
9004 if (GET_CODE (op0
) == TRUNCATE
9005 && GET_CODE (op1
) == TRUNCATE
)
9007 op0
= XEXP (op0
, 0);
9008 op1
= XEXP (op1
, 0);
9011 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
9012 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
9014 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
9016 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
9022 /* For left shifts, do the same, but just for the first operand.
9023 However, we cannot do anything with shifts where we cannot
9024 guarantee that the counts are smaller than the size of the mode
9025 because such a count will have a different meaning in a
9028 if (! (CONST_INT_P (XEXP (x
, 1))
9029 && INTVAL (XEXP (x
, 1)) >= 0
9030 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
9031 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
9032 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
9033 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
9036 /* If the shift count is a constant and we can do arithmetic in
9037 the mode of the shift, refine which bits we need. Otherwise, use the
9038 conservative form of the mask. */
9039 if (CONST_INT_P (XEXP (x
, 1))
9040 && INTVAL (XEXP (x
, 1)) >= 0
9041 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
9042 && HWI_COMPUTABLE_MODE_P (op_mode
))
9043 mask
>>= INTVAL (XEXP (x
, 1));
9047 op0
= gen_lowpart_or_truncate (op_mode
,
9048 force_to_mode (XEXP (x
, 0), mode
,
9049 mask
, next_select
));
9051 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9053 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
9059 /* Here we can only do something if the shift count is a constant,
9060 this shift constant is valid for the host, and we can do arithmetic
9063 if (CONST_INT_P (XEXP (x
, 1))
9064 && INTVAL (XEXP (x
, 1)) >= 0
9065 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
9066 && HWI_COMPUTABLE_MODE_P (op_mode
))
9068 rtx inner
= XEXP (x
, 0);
9069 unsigned HOST_WIDE_INT inner_mask
;
9071 /* Select the mask of the bits we need for the shift operand. */
9072 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
9074 /* We can only change the mode of the shift if we can do arithmetic
9075 in the mode of the shift and INNER_MASK is no wider than the
9076 width of X's mode. */
9077 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
9080 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
9082 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
9084 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
9089 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9090 shift and AND produces only copies of the sign bit (C2 is one less
9091 than a power of two), we can do this with just a shift. */
9093 if (GET_CODE (x
) == LSHIFTRT
9094 && CONST_INT_P (XEXP (x
, 1))
9095 /* The shift puts one of the sign bit copies in the least significant
9097 && ((INTVAL (XEXP (x
, 1))
9098 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
9099 >= GET_MODE_PRECISION (xmode
))
9100 && pow2p_hwi (mask
+ 1)
9101 /* Number of bits left after the shift must be more than the mask
9103 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
9104 <= GET_MODE_PRECISION (xmode
))
9105 /* Must be more sign bit copies than the mask needs. */
9106 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
9107 >= exact_log2 (mask
+ 1)))
9109 int nbits
= GET_MODE_PRECISION (xmode
) - exact_log2 (mask
+ 1);
9110 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
9111 gen_int_shift_amount (xmode
, nbits
));
9116 /* If we are just looking for the sign bit, we don't need this shift at
9117 all, even if it has a variable count. */
9118 if (val_signbit_p (xmode
, mask
))
9119 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9121 /* If this is a shift by a constant, get a mask that contains those bits
9122 that are not copies of the sign bit. We then have two cases: If
9123 MASK only includes those bits, this can be a logical shift, which may
9124 allow simplifications. If MASK is a single-bit field not within
9125 those bits, we are requesting a copy of the sign bit and hence can
9126 shift the sign bit to the appropriate location. */
9128 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
9129 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
9131 unsigned HOST_WIDE_INT nonzero
;
9134 /* If the considered data is wider than HOST_WIDE_INT, we can't
9135 represent a mask for all its bits in a single scalar.
9136 But we only care about the lower bits, so calculate these. */
9138 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
9140 nonzero
= HOST_WIDE_INT_M1U
;
9142 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9143 is the number of bits a full-width mask would have set.
9144 We need only shift if these are fewer than nonzero can
9145 hold. If not, we must keep all bits set in nonzero. */
9147 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
9148 < HOST_BITS_PER_WIDE_INT
)
9149 nonzero
>>= INTVAL (XEXP (x
, 1))
9150 + HOST_BITS_PER_WIDE_INT
9151 - GET_MODE_PRECISION (xmode
);
9155 nonzero
= GET_MODE_MASK (xmode
);
9156 nonzero
>>= INTVAL (XEXP (x
, 1));
9159 if ((mask
& ~nonzero
) == 0)
9161 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
9162 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
9163 if (GET_CODE (x
) != ASHIFTRT
)
9164 return force_to_mode (x
, mode
, mask
, next_select
);
9167 else if ((i
= exact_log2 (mask
)) >= 0)
9169 x
= simplify_shift_const
9170 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
9171 GET_MODE_PRECISION (xmode
) - 1 - i
);
9173 if (GET_CODE (x
) != ASHIFTRT
)
9174 return force_to_mode (x
, mode
, mask
, next_select
);
9178 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9179 even if the shift count isn't a constant. */
9181 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
9185 /* If this is a zero- or sign-extension operation that just affects bits
9186 we don't care about, remove it. Be sure the call above returned
9187 something that is still a shift. */
9189 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
9190 && CONST_INT_P (XEXP (x
, 1))
9191 && INTVAL (XEXP (x
, 1)) >= 0
9192 && (INTVAL (XEXP (x
, 1))
9193 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
9194 && GET_CODE (XEXP (x
, 0)) == ASHIFT
9195 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
9196 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
9203 /* If the shift count is constant and we can do computations
9204 in the mode of X, compute where the bits we care about are.
9205 Otherwise, we can't do anything. Don't change the mode of
9206 the shift or propagate MODE into the shift, though. */
9207 if (CONST_INT_P (XEXP (x
, 1))
9208 && INTVAL (XEXP (x
, 1)) >= 0)
9210 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
9211 xmode
, gen_int_mode (mask
, xmode
),
9213 if (temp
&& CONST_INT_P (temp
))
9214 x
= simplify_gen_binary (code
, xmode
,
9215 force_to_mode (XEXP (x
, 0), xmode
,
9216 INTVAL (temp
), next_select
),
9222 /* If we just want the low-order bit, the NEG isn't needed since it
9223 won't change the low-order bit. */
9225 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
9227 /* We need any bits less significant than the most significant bit in
9228 MASK since carries from those bits will affect the bits we are
9234 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9235 same as the XOR case above. Ensure that the constant we form is not
9236 wider than the mode of X. */
9238 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9239 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9240 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9241 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9242 < GET_MODE_PRECISION (xmode
))
9243 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9245 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9246 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9247 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9248 temp
, XEXP (XEXP (x
, 0), 1));
9250 return force_to_mode (x
, mode
, mask
, next_select
);
9253 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9254 use the full mask inside the NOT. */
9258 op0
= gen_lowpart_or_truncate (op_mode
,
9259 force_to_mode (XEXP (x
, 0), mode
, mask
,
9261 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9263 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9269 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9270 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9271 which is equal to STORE_FLAG_VALUE. */
9272 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9273 && XEXP (x
, 1) == const0_rtx
9274 && GET_MODE (XEXP (x
, 0)) == mode
9275 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9276 && (nonzero_bits (XEXP (x
, 0), mode
)
9277 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9278 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9283 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9284 written in a narrower mode. We play it safe and do not do so. */
9286 op0
= gen_lowpart_or_truncate (xmode
,
9287 force_to_mode (XEXP (x
, 1), mode
,
9288 mask
, next_select
));
9289 op1
= gen_lowpart_or_truncate (xmode
,
9290 force_to_mode (XEXP (x
, 2), mode
,
9291 mask
, next_select
));
9292 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9293 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9294 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9302 /* Ensure we return a value of the proper mode. */
9303 return gen_lowpart_or_truncate (mode
, x
);
9306 /* Return nonzero if X is an expression that has one of two values depending on
9307 whether some other value is zero or nonzero. In that case, we return the
9308 value that is being tested, *PTRUE is set to the value if the rtx being
9309 returned has a nonzero value, and *PFALSE is set to the other alternative.
9311 If we return zero, we set *PTRUE and *PFALSE to X. */
9314 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9316 machine_mode mode
= GET_MODE (x
);
9317 enum rtx_code code
= GET_CODE (x
);
9318 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9319 unsigned HOST_WIDE_INT nz
;
9320 scalar_int_mode int_mode
;
9322 /* If we are comparing a value against zero, we are done. */
9323 if ((code
== NE
|| code
== EQ
)
9324 && XEXP (x
, 1) == const0_rtx
)
9326 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9327 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9331 /* If this is a unary operation whose operand has one of two values, apply
9332 our opcode to compute those values. */
9333 else if (UNARY_P (x
)
9334 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9336 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9337 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9338 GET_MODE (XEXP (x
, 0)));
9342 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9343 make can't possibly match and would suppress other optimizations. */
9344 else if (code
== COMPARE
)
9347 /* If this is a binary operation, see if either side has only one of two
9348 values. If either one does or if both do and they are conditional on
9349 the same value, compute the new true and false values. */
9350 else if (BINARY_P (x
))
9352 rtx op0
= XEXP (x
, 0);
9353 rtx op1
= XEXP (x
, 1);
9354 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9355 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9357 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9358 && (REG_P (op0
) || REG_P (op1
)))
9360 /* Try to enable a simplification by undoing work done by
9361 if_then_else_cond if it converted a REG into something more
9366 true0
= false0
= op0
;
9371 true1
= false1
= op1
;
9375 if ((cond0
!= 0 || cond1
!= 0)
9376 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9378 /* If if_then_else_cond returned zero, then true/false are the
9379 same rtl. We must copy one of them to prevent invalid rtl
9382 true0
= copy_rtx (true0
);
9383 else if (cond1
== 0)
9384 true1
= copy_rtx (true1
);
9386 if (COMPARISON_P (x
))
9388 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9390 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9395 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9396 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9399 return cond0
? cond0
: cond1
;
9402 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9403 operands is zero when the other is nonzero, and vice-versa,
9404 and STORE_FLAG_VALUE is 1 or -1. */
9406 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9407 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9409 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9411 rtx op0
= XEXP (XEXP (x
, 0), 1);
9412 rtx op1
= XEXP (XEXP (x
, 1), 1);
9414 cond0
= XEXP (XEXP (x
, 0), 0);
9415 cond1
= XEXP (XEXP (x
, 1), 0);
9417 if (COMPARISON_P (cond0
)
9418 && COMPARISON_P (cond1
)
9419 && SCALAR_INT_MODE_P (mode
)
9420 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9421 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9422 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9423 || ((swap_condition (GET_CODE (cond0
))
9424 == reversed_comparison_code (cond1
, NULL
))
9425 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9426 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9427 && ! side_effects_p (x
))
9429 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9430 *pfalse
= simplify_gen_binary (MULT
, mode
,
9432 ? simplify_gen_unary (NEG
, mode
,
9440 /* Similarly for MULT, AND and UMIN, except that for these the result
9442 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9443 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9444 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9446 cond0
= XEXP (XEXP (x
, 0), 0);
9447 cond1
= XEXP (XEXP (x
, 1), 0);
9449 if (COMPARISON_P (cond0
)
9450 && COMPARISON_P (cond1
)
9451 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9452 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9453 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9454 || ((swap_condition (GET_CODE (cond0
))
9455 == reversed_comparison_code (cond1
, NULL
))
9456 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9457 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9458 && ! side_effects_p (x
))
9460 *ptrue
= *pfalse
= const0_rtx
;
9466 else if (code
== IF_THEN_ELSE
)
9468 /* If we have IF_THEN_ELSE already, extract the condition and
9469 canonicalize it if it is NE or EQ. */
9470 cond0
= XEXP (x
, 0);
9471 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9472 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9473 return XEXP (cond0
, 0);
9474 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9476 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9477 return XEXP (cond0
, 0);
9483 /* If X is a SUBREG, we can narrow both the true and false values
9484 if the inner expression, if there is a condition. */
9485 else if (code
== SUBREG
9486 && (cond0
= if_then_else_cond (SUBREG_REG (x
), &true0
,
9489 true0
= simplify_gen_subreg (mode
, true0
,
9490 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9491 false0
= simplify_gen_subreg (mode
, false0
,
9492 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9493 if (true0
&& false0
)
9501 /* If X is a constant, this isn't special and will cause confusions
9502 if we treat it as such. Likewise if it is equivalent to a constant. */
9503 else if (CONSTANT_P (x
)
9504 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9507 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9508 will be least confusing to the rest of the compiler. */
9509 else if (mode
== BImode
)
9511 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9515 /* If X is known to be either 0 or -1, those are the true and
9516 false values when testing X. */
9517 else if (x
== constm1_rtx
|| x
== const0_rtx
9518 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9519 && (num_sign_bit_copies (x
, int_mode
)
9520 == GET_MODE_PRECISION (int_mode
))))
9522 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9526 /* Likewise for 0 or a single bit. */
9527 else if (HWI_COMPUTABLE_MODE_P (mode
)
9528 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9530 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9534 /* Otherwise fail; show no condition with true and false values the same. */
9535 *ptrue
= *pfalse
= x
;
9539 /* Return the value of expression X given the fact that condition COND
9540 is known to be true when applied to REG as its first operand and VAL
9541 as its second. X is known to not be shared and so can be modified in
9544 We only handle the simplest cases, and specifically those cases that
9545 arise with IF_THEN_ELSE expressions. */
9548 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9550 enum rtx_code code
= GET_CODE (x
);
9554 if (side_effects_p (x
))
9557 /* If either operand of the condition is a floating point value,
9558 then we have to avoid collapsing an EQ comparison. */
9560 && rtx_equal_p (x
, reg
)
9561 && ! FLOAT_MODE_P (GET_MODE (x
))
9562 && ! FLOAT_MODE_P (GET_MODE (val
)))
9565 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9568 /* If X is (abs REG) and we know something about REG's relationship
9569 with zero, we may be able to simplify this. */
9571 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9574 case GE
: case GT
: case EQ
:
9577 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9579 GET_MODE (XEXP (x
, 0)));
9584 /* The only other cases we handle are MIN, MAX, and comparisons if the
9585 operands are the same as REG and VAL. */
9587 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9589 if (rtx_equal_p (XEXP (x
, 0), val
))
9591 std::swap (val
, reg
);
9592 cond
= swap_condition (cond
);
9595 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9597 if (COMPARISON_P (x
))
9599 if (comparison_dominates_p (cond
, code
))
9600 return VECTOR_MODE_P (GET_MODE (x
)) ? x
: const_true_rtx
;
9602 code
= reversed_comparison_code (x
, NULL
);
9604 && comparison_dominates_p (cond
, code
))
9605 return CONST0_RTX (GET_MODE (x
));
9609 else if (code
== SMAX
|| code
== SMIN
9610 || code
== UMIN
|| code
== UMAX
)
9612 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9614 /* Do not reverse the condition when it is NE or EQ.
9615 This is because we cannot conclude anything about
9616 the value of 'SMAX (x, y)' when x is not equal to y,
9617 but we can when x equals y. */
9618 if ((code
== SMAX
|| code
== UMAX
)
9619 && ! (cond
== EQ
|| cond
== NE
))
9620 cond
= reverse_condition (cond
);
9625 return unsignedp
? x
: XEXP (x
, 1);
9627 return unsignedp
? x
: XEXP (x
, 0);
9629 return unsignedp
? XEXP (x
, 1) : x
;
9631 return unsignedp
? XEXP (x
, 0) : x
;
9638 else if (code
== SUBREG
)
9640 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9641 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9643 if (SUBREG_REG (x
) != r
)
9645 /* We must simplify subreg here, before we lose track of the
9646 original inner_mode. */
9647 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9648 inner_mode
, SUBREG_BYTE (x
));
9652 SUBST (SUBREG_REG (x
), r
);
9657 /* We don't have to handle SIGN_EXTEND here, because even in the
9658 case of replacing something with a modeless CONST_INT, a
9659 CONST_INT is already (supposed to be) a valid sign extension for
9660 its narrower mode, which implies it's already properly
9661 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9662 story is different. */
9663 else if (code
== ZERO_EXTEND
)
9665 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9666 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9668 if (XEXP (x
, 0) != r
)
9670 /* We must simplify the zero_extend here, before we lose
9671 track of the original inner_mode. */
9672 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9677 SUBST (XEXP (x
, 0), r
);
9683 fmt
= GET_RTX_FORMAT (code
);
9684 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9687 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9688 else if (fmt
[i
] == 'E')
9689 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9690 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9697 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9698 assignment as a field assignment. */
9701 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9703 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9705 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9707 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9709 x
= adjust_address_nv (x
, GET_MODE (y
),
9710 byte_lowpart_offset (GET_MODE (y
),
9714 if (x
== y
|| rtx_equal_p (x
, y
))
9717 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9720 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9721 Note that all SUBREGs of MEM are paradoxical; otherwise they
9722 would have been rewritten. */
9723 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9724 && MEM_P (SUBREG_REG (y
))
9725 && rtx_equal_p (SUBREG_REG (y
),
9726 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9729 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9730 && MEM_P (SUBREG_REG (x
))
9731 && rtx_equal_p (SUBREG_REG (x
),
9732 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9735 /* We used to see if get_last_value of X and Y were the same but that's
9736 not correct. In one direction, we'll cause the assignment to have
9737 the wrong destination and in the case, we'll import a register into this
9738 insn that might have already have been dead. So fail if none of the
9739 above cases are true. */
9743 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9744 Return that assignment if so.
9746 We only handle the most common cases. */
9749 make_field_assignment (rtx x
)
9751 rtx dest
= SET_DEST (x
);
9752 rtx src
= SET_SRC (x
);
9757 unsigned HOST_WIDE_INT len
;
9760 /* All the rules in this function are specific to scalar integers. */
9761 scalar_int_mode mode
;
9762 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9765 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9766 a clear of a one-bit field. We will have changed it to
9767 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9770 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9771 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9772 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9773 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9775 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9778 return gen_rtx_SET (assign
, const0_rtx
);
9782 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9783 && subreg_lowpart_p (XEXP (src
, 0))
9784 && partial_subreg_p (XEXP (src
, 0))
9785 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9786 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9787 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9788 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9790 assign
= make_extraction (VOIDmode
, dest
, 0,
9791 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9794 return gen_rtx_SET (assign
, const0_rtx
);
9798 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9800 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9801 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9802 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9804 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9807 return gen_rtx_SET (assign
, const1_rtx
);
9811 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9812 SRC is an AND with all bits of that field set, then we can discard
9814 if (GET_CODE (dest
) == ZERO_EXTRACT
9815 && CONST_INT_P (XEXP (dest
, 1))
9816 && GET_CODE (src
) == AND
9817 && CONST_INT_P (XEXP (src
, 1)))
9819 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9820 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9821 unsigned HOST_WIDE_INT ze_mask
;
9823 if (width
>= HOST_BITS_PER_WIDE_INT
)
9826 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9828 /* Complete overlap. We can remove the source AND. */
9829 if ((and_mask
& ze_mask
) == ze_mask
)
9830 return gen_rtx_SET (dest
, XEXP (src
, 0));
9832 /* Partial overlap. We can reduce the source AND. */
9833 if ((and_mask
& ze_mask
) != and_mask
)
9835 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9836 gen_int_mode (and_mask
& ze_mask
, mode
));
9837 return gen_rtx_SET (dest
, src
);
9841 /* The other case we handle is assignments into a constant-position
9842 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9843 a mask that has all one bits except for a group of zero bits and
9844 OTHER is known to have zeros where C1 has ones, this is such an
9845 assignment. Compute the position and length from C1. Shift OTHER
9846 to the appropriate position, force it to the required mode, and
9847 make the extraction. Check for the AND in both operands. */
9849 /* One or more SUBREGs might obscure the constant-position field
9850 assignment. The first one we are likely to encounter is an outer
9851 narrowing SUBREG, which we can just strip for the purposes of
9852 identifying the constant-field assignment. */
9853 scalar_int_mode src_mode
= mode
;
9854 if (GET_CODE (src
) == SUBREG
9855 && subreg_lowpart_p (src
)
9856 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9857 src
= SUBREG_REG (src
);
9859 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9862 rhs
= expand_compound_operation (XEXP (src
, 0));
9863 lhs
= expand_compound_operation (XEXP (src
, 1));
9865 if (GET_CODE (rhs
) == AND
9866 && CONST_INT_P (XEXP (rhs
, 1))
9867 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9868 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9869 /* The second SUBREG that might get in the way is a paradoxical
9870 SUBREG around the first operand of the AND. We want to
9871 pretend the operand is as wide as the destination here. We
9872 do this by adjusting the MEM to wider mode for the sole
9873 purpose of the call to rtx_equal_for_field_assignment_p. Also
9874 note this trick only works for MEMs. */
9875 else if (GET_CODE (rhs
) == AND
9876 && paradoxical_subreg_p (XEXP (rhs
, 0))
9877 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9878 && CONST_INT_P (XEXP (rhs
, 1))
9879 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9881 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9882 else if (GET_CODE (lhs
) == AND
9883 && CONST_INT_P (XEXP (lhs
, 1))
9884 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9885 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9886 /* The second SUBREG that might get in the way is a paradoxical
9887 SUBREG around the first operand of the AND. We want to
9888 pretend the operand is as wide as the destination here. We
9889 do this by adjusting the MEM to wider mode for the sole
9890 purpose of the call to rtx_equal_for_field_assignment_p. Also
9891 note this trick only works for MEMs. */
9892 else if (GET_CODE (lhs
) == AND
9893 && paradoxical_subreg_p (XEXP (lhs
, 0))
9894 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9895 && CONST_INT_P (XEXP (lhs
, 1))
9896 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9898 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9902 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9904 || pos
+ len
> GET_MODE_PRECISION (mode
)
9905 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9906 || (c1
& nonzero_bits (other
, mode
)) != 0)
9909 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9913 /* The mode to use for the source is the mode of the assignment, or of
9914 what is inside a possible STRICT_LOW_PART. */
9915 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9916 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9918 /* Shift OTHER right POS places and make it the source, restricting it
9919 to the proper length and mode. */
9921 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9922 src_mode
, other
, pos
),
9924 src
= force_to_mode (src
, new_mode
,
9925 len
>= HOST_BITS_PER_WIDE_INT
9927 : (HOST_WIDE_INT_1U
<< len
) - 1,
9930 /* If SRC is masked by an AND that does not make a difference in
9931 the value being stored, strip it. */
9932 if (GET_CODE (assign
) == ZERO_EXTRACT
9933 && CONST_INT_P (XEXP (assign
, 1))
9934 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9935 && GET_CODE (src
) == AND
9936 && CONST_INT_P (XEXP (src
, 1))
9937 && UINTVAL (XEXP (src
, 1))
9938 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9939 src
= XEXP (src
, 0);
9941 return gen_rtx_SET (assign
, src
);
9944 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9948 apply_distributive_law (rtx x
)
9950 enum rtx_code code
= GET_CODE (x
);
9951 enum rtx_code inner_code
;
9952 rtx lhs
, rhs
, other
;
9955 /* Distributivity is not true for floating point as it can change the
9956 value. So we don't do it unless -funsafe-math-optimizations. */
9957 if (FLOAT_MODE_P (GET_MODE (x
))
9958 && ! flag_unsafe_math_optimizations
)
9961 /* The outer operation can only be one of the following: */
9962 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9963 && code
!= PLUS
&& code
!= MINUS
)
9969 /* If either operand is a primitive we can't do anything, so get out
9971 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9974 lhs
= expand_compound_operation (lhs
);
9975 rhs
= expand_compound_operation (rhs
);
9976 inner_code
= GET_CODE (lhs
);
9977 if (inner_code
!= GET_CODE (rhs
))
9980 /* See if the inner and outer operations distribute. */
9987 /* These all distribute except over PLUS. */
9988 if (code
== PLUS
|| code
== MINUS
)
9993 if (code
!= PLUS
&& code
!= MINUS
)
9998 /* This is also a multiply, so it distributes over everything. */
10001 /* This used to handle SUBREG, but this turned out to be counter-
10002 productive, since (subreg (op ...)) usually is not handled by
10003 insn patterns, and this "optimization" therefore transformed
10004 recognizable patterns into unrecognizable ones. Therefore the
10005 SUBREG case was removed from here.
10007 It is possible that distributing SUBREG over arithmetic operations
10008 leads to an intermediate result than can then be optimized further,
10009 e.g. by moving the outer SUBREG to the other side of a SET as done
10010 in simplify_set. This seems to have been the original intent of
10011 handling SUBREGs here.
10013 However, with current GCC this does not appear to actually happen,
10014 at least on major platforms. If some case is found where removing
10015 the SUBREG case here prevents follow-on optimizations, distributing
10016 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
10022 /* Set LHS and RHS to the inner operands (A and B in the example
10023 above) and set OTHER to the common operand (C in the example).
10024 There is only one way to do this unless the inner operation is
10026 if (COMMUTATIVE_ARITH_P (lhs
)
10027 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
10028 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
10029 else if (COMMUTATIVE_ARITH_P (lhs
)
10030 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
10031 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
10032 else if (COMMUTATIVE_ARITH_P (lhs
)
10033 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
10034 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
10035 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
10036 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
10040 /* Form the new inner operation, seeing if it simplifies first. */
10041 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
10043 /* There is one exception to the general way of distributing:
10044 (a | c) ^ (b | c) -> (a ^ b) & ~c */
10045 if (code
== XOR
&& inner_code
== IOR
)
10048 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
10051 /* We may be able to continuing distributing the result, so call
10052 ourselves recursively on the inner operation before forming the
10053 outer operation, which we return. */
10054 return simplify_gen_binary (inner_code
, GET_MODE (x
),
10055 apply_distributive_law (tem
), other
);
10058 /* See if X is of the form (* (+ A B) C), and if so convert to
10059 (+ (* A C) (* B C)) and try to simplify.
10061 Most of the time, this results in no change. However, if some of
10062 the operands are the same or inverses of each other, simplifications
10065 For example, (and (ior A B) (not B)) can occur as the result of
10066 expanding a bit field assignment. When we apply the distributive
10067 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10068 which then simplifies to (and (A (not B))).
10070 Note that no checks happen on the validity of applying the inverse
10071 distributive law. This is pointless since we can do it in the
10072 few places where this routine is called.
10074 N is the index of the term that is decomposed (the arithmetic operation,
10075 i.e. (+ A B) in the first example above). !N is the index of the term that
10076 is distributed, i.e. of C in the first example above. */
10078 distribute_and_simplify_rtx (rtx x
, int n
)
10081 enum rtx_code outer_code
, inner_code
;
10082 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
10084 /* Distributivity is not true for floating point as it can change the
10085 value. So we don't do it unless -funsafe-math-optimizations. */
10086 if (FLOAT_MODE_P (GET_MODE (x
))
10087 && ! flag_unsafe_math_optimizations
)
10090 decomposed
= XEXP (x
, n
);
10091 if (!ARITHMETIC_P (decomposed
))
10094 mode
= GET_MODE (x
);
10095 outer_code
= GET_CODE (x
);
10096 distributed
= XEXP (x
, !n
);
10098 inner_code
= GET_CODE (decomposed
);
10099 inner_op0
= XEXP (decomposed
, 0);
10100 inner_op1
= XEXP (decomposed
, 1);
10102 /* Special case (and (xor B C) (not A)), which is equivalent to
10103 (xor (ior A B) (ior A C)) */
10104 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
10106 distributed
= XEXP (distributed
, 0);
10112 /* Distribute the second term. */
10113 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
10114 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
10118 /* Distribute the first term. */
10119 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
10120 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
10123 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
10124 new_op0
, new_op1
));
10125 if (GET_CODE (tmp
) != outer_code
10126 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
10127 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
10133 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10134 in MODE. Return an equivalent form, if different from (and VAROP
10135 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10138 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
10139 unsigned HOST_WIDE_INT constop
)
10141 unsigned HOST_WIDE_INT nonzero
;
10142 unsigned HOST_WIDE_INT orig_constop
;
10146 orig_varop
= varop
;
10147 orig_constop
= constop
;
10148 if (GET_CODE (varop
) == CLOBBER
)
10151 /* Simplify VAROP knowing that we will be only looking at some of the
10154 Note by passing in CONSTOP, we guarantee that the bits not set in
10155 CONSTOP are not significant and will never be examined. We must
10156 ensure that is the case by explicitly masking out those bits
10157 before returning. */
10158 varop
= force_to_mode (varop
, mode
, constop
, 0);
10160 /* If VAROP is a CLOBBER, we will fail so return it. */
10161 if (GET_CODE (varop
) == CLOBBER
)
10164 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10165 to VAROP and return the new constant. */
10166 if (CONST_INT_P (varop
))
10167 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
10169 /* See what bits may be nonzero in VAROP. Unlike the general case of
10170 a call to nonzero_bits, here we don't care about bits outside
10173 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
10175 /* Turn off all bits in the constant that are known to already be zero.
10176 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10177 which is tested below. */
10179 constop
&= nonzero
;
10181 /* If we don't have any bits left, return zero. */
10185 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10186 a power of two, we can replace this with an ASHIFT. */
10187 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
10188 && (i
= exact_log2 (constop
)) >= 0)
10189 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
10191 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10192 or XOR, then try to apply the distributive law. This may eliminate
10193 operations if either branch can be simplified because of the AND.
10194 It may also make some cases more complex, but those cases probably
10195 won't match a pattern either with or without this. */
10197 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
10199 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10203 apply_distributive_law
10204 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
10205 simplify_and_const_int (NULL_RTX
, varop_mode
,
10208 simplify_and_const_int (NULL_RTX
, varop_mode
,
10213 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10214 the AND and see if one of the operands simplifies to zero. If so, we
10215 may eliminate it. */
10217 if (GET_CODE (varop
) == PLUS
10218 && pow2p_hwi (constop
+ 1))
10222 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
10223 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
10224 if (o0
== const0_rtx
)
10226 if (o1
== const0_rtx
)
10230 /* Make a SUBREG if necessary. If we can't make it, fail. */
10231 varop
= gen_lowpart (mode
, varop
);
10232 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10235 /* If we are only masking insignificant bits, return VAROP. */
10236 if (constop
== nonzero
)
10239 if (varop
== orig_varop
&& constop
== orig_constop
)
10242 /* Otherwise, return an AND. */
10243 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10247 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10250 Return an equivalent form, if different from X. Otherwise, return X. If
10251 X is zero, we are to always construct the equivalent form. */
10254 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10255 unsigned HOST_WIDE_INT constop
)
10257 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10262 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10263 gen_int_mode (constop
, mode
));
10264 if (GET_MODE (x
) != mode
)
10265 x
= gen_lowpart (mode
, x
);
10269 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10270 We don't care about bits outside of those defined in MODE.
10271 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10273 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10274 a shift, AND, or zero_extract, we can do better. */
10277 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10278 scalar_int_mode mode
,
10279 unsigned HOST_WIDE_INT
*nonzero
)
10282 reg_stat_type
*rsp
;
10284 /* If X is a register whose nonzero bits value is current, use it.
10285 Otherwise, if X is a register whose value we can find, use that
10286 value. Otherwise, use the previously-computed global nonzero bits
10287 for this register. */
10289 rsp
= ®_stat
[REGNO (x
)];
10290 if (rsp
->last_set_value
!= 0
10291 && (rsp
->last_set_mode
== mode
10292 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10293 && GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10294 && GET_MODE_CLASS (mode
) == MODE_INT
))
10295 && ((rsp
->last_set_label
>= label_tick_ebb_start
10296 && rsp
->last_set_label
< label_tick
)
10297 || (rsp
->last_set_label
== label_tick
10298 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10299 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10300 && REGNO (x
) < reg_n_sets_max
10301 && REG_N_SETS (REGNO (x
)) == 1
10302 && !REGNO_REG_SET_P
10303 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10306 /* Note that, even if the precision of last_set_mode is lower than that
10307 of mode, record_value_for_reg invoked nonzero_bits on the register
10308 with nonzero_bits_mode (because last_set_mode is necessarily integral
10309 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10310 are all valid, hence in mode too since nonzero_bits_mode is defined
10311 to the largest HWI_COMPUTABLE_MODE_P mode. */
10312 *nonzero
&= rsp
->last_set_nonzero_bits
;
10316 tem
= get_last_value (x
);
10319 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10320 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10325 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10327 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10329 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10330 /* We don't know anything about the upper bits. */
10331 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10339 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10340 end of X that are known to be equal to the sign bit. X will be used
10341 in mode MODE; the returned value will always be between 1 and the
10342 number of bits in MODE. */
10345 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10346 scalar_int_mode mode
,
10347 unsigned int *result
)
10350 reg_stat_type
*rsp
;
10352 rsp
= ®_stat
[REGNO (x
)];
10353 if (rsp
->last_set_value
!= 0
10354 && rsp
->last_set_mode
== mode
10355 && ((rsp
->last_set_label
>= label_tick_ebb_start
10356 && rsp
->last_set_label
< label_tick
)
10357 || (rsp
->last_set_label
== label_tick
10358 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10359 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10360 && REGNO (x
) < reg_n_sets_max
10361 && REG_N_SETS (REGNO (x
)) == 1
10362 && !REGNO_REG_SET_P
10363 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10366 *result
= rsp
->last_set_sign_bit_copies
;
10370 tem
= get_last_value (x
);
10374 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10375 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10376 *result
= rsp
->sign_bit_copies
;
10381 /* Return the number of "extended" bits there are in X, when interpreted
10382 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10383 unsigned quantities, this is the number of high-order zero bits.
10384 For signed quantities, this is the number of copies of the sign bit
10385 minus 1. In both case, this function returns the number of "spare"
10386 bits. For example, if two quantities for which this function returns
10387 at least 1 are added, the addition is known not to overflow.
10389 This function will always return 0 unless called during combine, which
10390 implies that it must be called from a define_split. */
10393 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10395 if (nonzero_sign_valid
== 0)
10398 scalar_int_mode int_mode
;
10400 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10401 && HWI_COMPUTABLE_MODE_P (int_mode
)
10402 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10403 - floor_log2 (nonzero_bits (x
, int_mode
)))
10405 : num_sign_bit_copies (x
, mode
) - 1);
10408 /* This function is called from `simplify_shift_const' to merge two
10409 outer operations. Specifically, we have already found that we need
10410 to perform operation *POP0 with constant *PCONST0 at the outermost
10411 position. We would now like to also perform OP1 with constant CONST1
10412 (with *POP0 being done last).
10414 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10415 the resulting operation. *PCOMP_P is set to 1 if we would need to
10416 complement the innermost operand, otherwise it is unchanged.
10418 MODE is the mode in which the operation will be done. No bits outside
10419 the width of this mode matter. It is assumed that the width of this mode
10420 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10422 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10423 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10424 result is simply *PCONST0.
10426 If the resulting operation cannot be expressed as one operation, we
10427 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10430 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10432 enum rtx_code op0
= *pop0
;
10433 HOST_WIDE_INT const0
= *pconst0
;
10435 const0
&= GET_MODE_MASK (mode
);
10436 const1
&= GET_MODE_MASK (mode
);
10438 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10442 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10445 if (op1
== UNKNOWN
|| op0
== SET
)
10448 else if (op0
== UNKNOWN
)
10449 op0
= op1
, const0
= const1
;
10451 else if (op0
== op1
)
10475 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10476 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10479 /* If the two constants aren't the same, we can't do anything. The
10480 remaining six cases can all be done. */
10481 else if (const0
!= const1
)
10489 /* (a & b) | b == b */
10491 else /* op1 == XOR */
10492 /* (a ^ b) | b == a | b */
10498 /* (a & b) ^ b == (~a) & b */
10499 op0
= AND
, *pcomp_p
= 1;
10500 else /* op1 == IOR */
10501 /* (a | b) ^ b == a & ~b */
10502 op0
= AND
, const0
= ~const0
;
10507 /* (a | b) & b == b */
10509 else /* op1 == XOR */
10510 /* (a ^ b) & b) == (~a) & b */
10517 /* Check for NO-OP cases. */
10518 const0
&= GET_MODE_MASK (mode
);
10520 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10522 else if (const0
== 0 && op0
== AND
)
10524 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10530 /* ??? Slightly redundant with the above mask, but not entirely.
10531 Moving this above means we'd have to sign-extend the mode mask
10532 for the final test. */
10533 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10534 *pconst0
= trunc_int_for_mode (const0
, mode
);
10539 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10540 the shift in. The original shift operation CODE is performed on OP in
10541 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10542 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10543 result of the shift is subject to operation OUTER_CODE with operand
10546 static scalar_int_mode
10547 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10548 scalar_int_mode orig_mode
, scalar_int_mode mode
,
10549 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10551 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10553 /* In general we can't perform in wider mode for right shift and rotate. */
10557 /* We can still widen if the bits brought in from the left are identical
10558 to the sign bit of ORIG_MODE. */
10559 if (num_sign_bit_copies (op
, mode
)
10560 > (unsigned) (GET_MODE_PRECISION (mode
)
10561 - GET_MODE_PRECISION (orig_mode
)))
10566 /* Similarly here but with zero bits. */
10567 if (HWI_COMPUTABLE_MODE_P (mode
)
10568 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10571 /* We can also widen if the bits brought in will be masked off. This
10572 operation is performed in ORIG_MODE. */
10573 if (outer_code
== AND
)
10575 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10578 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10587 gcc_unreachable ();
10594 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10595 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10596 if we cannot simplify it. Otherwise, return a simplified value.
10598 The shift is normally computed in the widest mode we find in VAROP, as
10599 long as it isn't a different number of words than RESULT_MODE. Exceptions
10600 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10603 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10604 rtx varop
, int orig_count
)
10606 enum rtx_code orig_code
= code
;
10607 rtx orig_varop
= varop
;
10609 machine_mode mode
= result_mode
;
10610 machine_mode shift_mode
;
10611 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10612 /* We form (outer_op (code varop count) (outer_const)). */
10613 enum rtx_code outer_op
= UNKNOWN
;
10614 HOST_WIDE_INT outer_const
= 0;
10615 int complement_p
= 0;
10618 /* Make sure and truncate the "natural" shift on the way in. We don't
10619 want to do this inside the loop as it makes it more difficult to
10621 if (SHIFT_COUNT_TRUNCATED
)
10622 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10624 /* If we were given an invalid count, don't do anything except exactly
10625 what was requested. */
10627 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10630 count
= orig_count
;
10632 /* Unless one of the branches of the `if' in this loop does a `continue',
10633 we will `break' the loop after the `if'. */
10637 /* If we have an operand of (clobber (const_int 0)), fail. */
10638 if (GET_CODE (varop
) == CLOBBER
)
10641 /* Convert ROTATERT to ROTATE. */
10642 if (code
== ROTATERT
)
10644 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10646 count
= bitsize
- count
;
10649 shift_mode
= result_mode
;
10650 if (shift_mode
!= mode
)
10652 /* We only change the modes of scalar shifts. */
10653 int_mode
= as_a
<scalar_int_mode
> (mode
);
10654 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10655 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10656 int_result_mode
, int_mode
,
10657 outer_op
, outer_const
);
10660 scalar_int_mode shift_unit_mode
10661 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10663 /* Handle cases where the count is greater than the size of the mode
10664 minus 1. For ASHIFT, use the size minus one as the count (this can
10665 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10666 take the count modulo the size. For other shifts, the result is
10669 Since these shifts are being produced by the compiler by combining
10670 multiple operations, each of which are defined, we know what the
10671 result is supposed to be. */
10673 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10675 if (code
== ASHIFTRT
)
10676 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10677 else if (code
== ROTATE
|| code
== ROTATERT
)
10678 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10681 /* We can't simply return zero because there may be an
10683 varop
= const0_rtx
;
10689 /* If we discovered we had to complement VAROP, leave. Making a NOT
10690 here would cause an infinite loop. */
10694 if (shift_mode
== shift_unit_mode
)
10696 /* An arithmetic right shift of a quantity known to be -1 or 0
10698 if (code
== ASHIFTRT
10699 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10700 == GET_MODE_PRECISION (shift_unit_mode
)))
10706 /* If we are doing an arithmetic right shift and discarding all but
10707 the sign bit copies, this is equivalent to doing a shift by the
10708 bitsize minus one. Convert it into that shift because it will
10709 often allow other simplifications. */
10711 if (code
== ASHIFTRT
10712 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10713 >= GET_MODE_PRECISION (shift_unit_mode
)))
10714 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10716 /* We simplify the tests below and elsewhere by converting
10717 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10718 `make_compound_operation' will convert it to an ASHIFTRT for
10719 those machines (such as VAX) that don't have an LSHIFTRT. */
10720 if (code
== ASHIFTRT
10721 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10722 && val_signbit_known_clear_p (shift_unit_mode
,
10723 nonzero_bits (varop
,
10727 if (((code
== LSHIFTRT
10728 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10729 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10731 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10732 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10733 & GET_MODE_MASK (shift_unit_mode
))))
10734 && !side_effects_p (varop
))
10735 varop
= const0_rtx
;
10738 switch (GET_CODE (varop
))
10744 new_rtx
= expand_compound_operation (varop
);
10745 if (new_rtx
!= varop
)
10753 /* The following rules apply only to scalars. */
10754 if (shift_mode
!= shift_unit_mode
)
10756 int_mode
= as_a
<scalar_int_mode
> (mode
);
10758 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10759 minus the width of a smaller mode, we can do this with a
10760 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10761 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10762 && ! mode_dependent_address_p (XEXP (varop
, 0),
10763 MEM_ADDR_SPACE (varop
))
10764 && ! MEM_VOLATILE_P (varop
)
10765 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10768 new_rtx
= adjust_address_nv (varop
, tmode
,
10769 BYTES_BIG_ENDIAN
? 0
10770 : count
/ BITS_PER_UNIT
);
10772 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10773 : ZERO_EXTEND
, int_mode
, new_rtx
);
10780 /* The following rules apply only to scalars. */
10781 if (shift_mode
!= shift_unit_mode
)
10783 int_mode
= as_a
<scalar_int_mode
> (mode
);
10784 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10786 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10787 the same number of words as what we've seen so far. Then store
10788 the widest mode in MODE. */
10789 if (subreg_lowpart_p (varop
)
10790 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10791 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10792 && (CEIL (GET_MODE_SIZE (inner_mode
), UNITS_PER_WORD
)
10793 == CEIL (GET_MODE_SIZE (int_mode
), UNITS_PER_WORD
))
10794 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10796 varop
= SUBREG_REG (varop
);
10797 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10804 /* Some machines use MULT instead of ASHIFT because MULT
10805 is cheaper. But it is still better on those machines to
10806 merge two shifts into one. */
10807 if (CONST_INT_P (XEXP (varop
, 1))
10808 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10810 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10811 varop
= simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10812 XEXP (varop
, 0), log2_rtx
);
10818 /* Similar, for when divides are cheaper. */
10819 if (CONST_INT_P (XEXP (varop
, 1))
10820 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10822 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10823 varop
= simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10824 XEXP (varop
, 0), log2_rtx
);
10830 /* If we are extracting just the sign bit of an arithmetic
10831 right shift, that shift is not needed. However, the sign
10832 bit of a wider mode may be different from what would be
10833 interpreted as the sign bit in a narrower mode, so, if
10834 the result is narrower, don't discard the shift. */
10835 if (code
== LSHIFTRT
10836 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10837 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10838 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10840 varop
= XEXP (varop
, 0);
10849 /* The following rules apply only to scalars. */
10850 if (shift_mode
!= shift_unit_mode
)
10852 int_mode
= as_a
<scalar_int_mode
> (mode
);
10853 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10854 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10856 /* Here we have two nested shifts. The result is usually the
10857 AND of a new shift with a mask. We compute the result below. */
10858 if (CONST_INT_P (XEXP (varop
, 1))
10859 && INTVAL (XEXP (varop
, 1)) >= 0
10860 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10861 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10862 && HWI_COMPUTABLE_MODE_P (int_mode
))
10864 enum rtx_code first_code
= GET_CODE (varop
);
10865 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10866 unsigned HOST_WIDE_INT mask
;
10869 /* We have one common special case. We can't do any merging if
10870 the inner code is an ASHIFTRT of a smaller mode. However, if
10871 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10872 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10873 we can convert it to
10874 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10875 This simplifies certain SIGN_EXTEND operations. */
10876 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10877 && count
== (GET_MODE_PRECISION (int_result_mode
)
10878 - GET_MODE_PRECISION (int_varop_mode
)))
10880 /* C3 has the low-order C1 bits zero. */
10882 mask
= GET_MODE_MASK (int_mode
)
10883 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10885 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10886 XEXP (varop
, 0), mask
);
10887 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10888 int_result_mode
, varop
, count
);
10889 count
= first_count
;
10894 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10895 than C1 high-order bits equal to the sign bit, we can convert
10896 this to either an ASHIFT or an ASHIFTRT depending on the
10899 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10901 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10902 && int_varop_mode
== shift_unit_mode
10903 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10906 varop
= XEXP (varop
, 0);
10907 count
-= first_count
;
10917 /* There are some cases we can't do. If CODE is ASHIFTRT,
10918 we can only do this if FIRST_CODE is also ASHIFTRT.
10920 We can't do the case when CODE is ROTATE and FIRST_CODE is
10923 If the mode of this shift is not the mode of the outer shift,
10924 we can't do this if either shift is a right shift or ROTATE.
10926 Finally, we can't do any of these if the mode is too wide
10927 unless the codes are the same.
10929 Handle the case where the shift codes are the same
10932 if (code
== first_code
)
10934 if (int_varop_mode
!= int_result_mode
10935 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10936 || code
== ROTATE
))
10939 count
+= first_count
;
10940 varop
= XEXP (varop
, 0);
10944 if (code
== ASHIFTRT
10945 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10946 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10947 || (int_varop_mode
!= int_result_mode
10948 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10949 || first_code
== ROTATE
10950 || code
== ROTATE
)))
10953 /* To compute the mask to apply after the shift, shift the
10954 nonzero bits of the inner shift the same way the
10955 outer shift will. */
10957 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10959 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10961 = simplify_const_binary_operation (code
, int_result_mode
,
10962 mask_rtx
, count_rtx
);
10964 /* Give up if we can't compute an outer operation to use. */
10966 || !CONST_INT_P (mask_rtx
)
10967 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10969 int_result_mode
, &complement_p
))
10972 /* If the shifts are in the same direction, we add the
10973 counts. Otherwise, we subtract them. */
10974 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10975 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10976 count
+= first_count
;
10978 count
-= first_count
;
10980 /* If COUNT is positive, the new shift is usually CODE,
10981 except for the two exceptions below, in which case it is
10982 FIRST_CODE. If the count is negative, FIRST_CODE should
10985 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10986 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10988 else if (count
< 0)
10989 code
= first_code
, count
= -count
;
10991 varop
= XEXP (varop
, 0);
10995 /* If we have (A << B << C) for any shift, we can convert this to
10996 (A << C << B). This wins if A is a constant. Only try this if
10997 B is not a constant. */
10999 else if (GET_CODE (varop
) == code
11000 && CONST_INT_P (XEXP (varop
, 0))
11001 && !CONST_INT_P (XEXP (varop
, 1)))
11003 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
11004 sure the result will be masked. See PR70222. */
11005 if (code
== LSHIFTRT
11006 && int_mode
!= int_result_mode
11007 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
11008 GET_MODE_MASK (int_result_mode
)
11009 >> orig_count
, int_result_mode
,
11012 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
11013 up outer sign extension (often left and right shift) is
11014 hardly more efficient than the original. See PR70429. */
11015 if (code
== ASHIFTRT
&& int_mode
!= int_result_mode
)
11018 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
11019 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
11022 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
11029 /* The following rules apply only to scalars. */
11030 if (shift_mode
!= shift_unit_mode
)
11033 /* Make this fit the case below. */
11034 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
11040 /* The following rules apply only to scalars. */
11041 if (shift_mode
!= shift_unit_mode
)
11043 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11044 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11046 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11047 with C the size of VAROP - 1 and the shift is logical if
11048 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11049 we have an (le X 0) operation. If we have an arithmetic shift
11050 and STORE_FLAG_VALUE is 1 or we have a logical shift with
11051 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
11053 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
11054 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
11055 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11056 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11057 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11058 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11061 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
11064 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11065 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11070 /* If we have (shift (logical)), move the logical to the outside
11071 to allow it to possibly combine with another logical and the
11072 shift to combine with another shift. This also canonicalizes to
11073 what a ZERO_EXTRACT looks like. Also, some machines have
11074 (and (shift)) insns. */
11076 if (CONST_INT_P (XEXP (varop
, 1))
11077 /* We can't do this if we have (ashiftrt (xor)) and the
11078 constant has its sign bit set in shift_unit_mode with
11079 shift_unit_mode wider than result_mode. */
11080 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
11081 && int_result_mode
!= shift_unit_mode
11082 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
11083 shift_unit_mode
) < 0)
11084 && (new_rtx
= simplify_const_binary_operation
11085 (code
, int_result_mode
,
11086 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11087 gen_int_shift_amount (int_result_mode
, count
))) != 0
11088 && CONST_INT_P (new_rtx
)
11089 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
11090 INTVAL (new_rtx
), int_result_mode
,
11093 varop
= XEXP (varop
, 0);
11097 /* If we can't do that, try to simplify the shift in each arm of the
11098 logical expression, make a new logical expression, and apply
11099 the inverse distributive law. This also can't be done for
11100 (ashiftrt (xor)) where we've widened the shift and the constant
11101 changes the sign bit. */
11102 if (CONST_INT_P (XEXP (varop
, 1))
11103 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
11104 && int_result_mode
!= shift_unit_mode
11105 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
11106 shift_unit_mode
) < 0))
11108 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
11109 XEXP (varop
, 0), count
);
11110 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
11111 XEXP (varop
, 1), count
);
11113 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
11115 varop
= apply_distributive_law (varop
);
11123 /* The following rules apply only to scalars. */
11124 if (shift_mode
!= shift_unit_mode
)
11126 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11128 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11129 says that the sign bit can be tested, FOO has mode MODE, C is
11130 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11131 that may be nonzero. */
11132 if (code
== LSHIFTRT
11133 && XEXP (varop
, 1) == const0_rtx
11134 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
11135 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11136 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11137 && STORE_FLAG_VALUE
== -1
11138 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11139 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11140 int_result_mode
, &complement_p
))
11142 varop
= XEXP (varop
, 0);
11149 /* The following rules apply only to scalars. */
11150 if (shift_mode
!= shift_unit_mode
)
11152 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11154 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11155 than the number of bits in the mode is equivalent to A. */
11156 if (code
== LSHIFTRT
11157 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11158 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
11160 varop
= XEXP (varop
, 0);
11165 /* NEG commutes with ASHIFT since it is multiplication. Move the
11166 NEG outside to allow shifts to combine. */
11168 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
11169 int_result_mode
, &complement_p
))
11171 varop
= XEXP (varop
, 0);
11177 /* The following rules apply only to scalars. */
11178 if (shift_mode
!= shift_unit_mode
)
11180 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11182 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11183 is one less than the number of bits in the mode is
11184 equivalent to (xor A 1). */
11185 if (code
== LSHIFTRT
11186 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11187 && XEXP (varop
, 1) == constm1_rtx
11188 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11189 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11190 int_result_mode
, &complement_p
))
11193 varop
= XEXP (varop
, 0);
11197 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11198 that might be nonzero in BAR are those being shifted out and those
11199 bits are known zero in FOO, we can replace the PLUS with FOO.
11200 Similarly in the other operand order. This code occurs when
11201 we are computing the size of a variable-size array. */
11203 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11204 && count
< HOST_BITS_PER_WIDE_INT
11205 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
11206 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
11207 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
11209 varop
= XEXP (varop
, 0);
11212 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11213 && count
< HOST_BITS_PER_WIDE_INT
11214 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11215 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11217 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11218 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)) == 0)
11220 varop
= XEXP (varop
, 1);
11224 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11226 && CONST_INT_P (XEXP (varop
, 1))
11227 && (new_rtx
= simplify_const_binary_operation
11228 (ASHIFT
, int_result_mode
,
11229 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11230 gen_int_shift_amount (int_result_mode
, count
))) != 0
11231 && CONST_INT_P (new_rtx
)
11232 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11233 INTVAL (new_rtx
), int_result_mode
,
11236 varop
= XEXP (varop
, 0);
11240 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11241 signbit', and attempt to change the PLUS to an XOR and move it to
11242 the outer operation as is done above in the AND/IOR/XOR case
11243 leg for shift(logical). See details in logical handling above
11244 for reasoning in doing so. */
11245 if (code
== LSHIFTRT
11246 && CONST_INT_P (XEXP (varop
, 1))
11247 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11248 && (new_rtx
= simplify_const_binary_operation
11249 (code
, int_result_mode
,
11250 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11251 gen_int_shift_amount (int_result_mode
, count
))) != 0
11252 && CONST_INT_P (new_rtx
)
11253 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11254 INTVAL (new_rtx
), int_result_mode
,
11257 varop
= XEXP (varop
, 0);
11264 /* The following rules apply only to scalars. */
11265 if (shift_mode
!= shift_unit_mode
)
11267 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11269 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11270 with C the size of VAROP - 1 and the shift is logical if
11271 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11272 we have a (gt X 0) operation. If the shift is arithmetic with
11273 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11274 we have a (neg (gt X 0)) operation. */
11276 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11277 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11278 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11279 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11280 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11281 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11282 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11285 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11288 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11289 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11296 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11297 if the truncate does not affect the value. */
11298 if (code
== LSHIFTRT
11299 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11300 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11301 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11302 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11303 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11305 rtx varop_inner
= XEXP (varop
, 0);
11306 int new_count
= count
+ INTVAL (XEXP (varop_inner
, 1));
11307 rtx new_count_rtx
= gen_int_shift_amount (GET_MODE (varop_inner
),
11309 varop_inner
= gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11310 XEXP (varop_inner
, 0),
11312 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11325 shift_mode
= result_mode
;
11326 if (shift_mode
!= mode
)
11328 /* We only change the modes of scalar shifts. */
11329 int_mode
= as_a
<scalar_int_mode
> (mode
);
11330 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11331 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11332 int_mode
, outer_op
, outer_const
);
11335 /* We have now finished analyzing the shift. The result should be
11336 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11337 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11338 to the result of the shift. OUTER_CONST is the relevant constant,
11339 but we must turn off all bits turned off in the shift. */
11341 if (outer_op
== UNKNOWN
11342 && orig_code
== code
&& orig_count
== count
11343 && varop
== orig_varop
11344 && shift_mode
== GET_MODE (varop
))
11347 /* Make a SUBREG if necessary. If we can't make it, fail. */
11348 varop
= gen_lowpart (shift_mode
, varop
);
11349 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11352 /* If we have an outer operation and we just made a shift, it is
11353 possible that we could have simplified the shift were it not
11354 for the outer operation. So try to do the simplification
11357 if (outer_op
!= UNKNOWN
)
11358 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11363 x
= simplify_gen_binary (code
, shift_mode
, varop
,
11364 gen_int_shift_amount (shift_mode
, count
));
11366 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11367 turn off all the bits that the shift would have turned off. */
11368 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11369 /* We only change the modes of scalar shifts. */
11370 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11371 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11373 /* Do the remainder of the processing in RESULT_MODE. */
11374 x
= gen_lowpart_or_truncate (result_mode
, x
);
11376 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11379 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11381 if (outer_op
!= UNKNOWN
)
11383 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11385 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11386 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11387 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11389 if (outer_op
== AND
)
11390 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11391 else if (outer_op
== SET
)
11393 /* This means that we have determined that the result is
11394 equivalent to a constant. This should be rare. */
11395 if (!side_effects_p (x
))
11396 x
= GEN_INT (outer_const
);
11398 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11399 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11401 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11402 GEN_INT (outer_const
));
11408 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11409 The result of the shift is RESULT_MODE. If we cannot simplify it,
11410 return X or, if it is NULL, synthesize the expression with
11411 simplify_gen_binary. Otherwise, return a simplified value.
11413 The shift is normally computed in the widest mode we find in VAROP, as
11414 long as it isn't a different number of words than RESULT_MODE. Exceptions
11415 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11418 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11419 rtx varop
, int count
)
11421 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11426 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
,
11427 gen_int_shift_amount (GET_MODE (varop
), count
));
11428 if (GET_MODE (x
) != result_mode
)
11429 x
= gen_lowpart (result_mode
, x
);
11434 /* A subroutine of recog_for_combine. See there for arguments and
11438 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11440 rtx pat
= *pnewpat
;
11441 rtx pat_without_clobbers
;
11442 int insn_code_number
;
11443 int num_clobbers_to_add
= 0;
11445 rtx notes
= NULL_RTX
;
11446 rtx old_notes
, old_pat
;
11449 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11450 we use to indicate that something didn't match. If we find such a
11451 thing, force rejection. */
11452 if (GET_CODE (pat
) == PARALLEL
)
11453 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11454 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11455 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11458 old_pat
= PATTERN (insn
);
11459 old_notes
= REG_NOTES (insn
);
11460 PATTERN (insn
) = pat
;
11461 REG_NOTES (insn
) = NULL_RTX
;
11463 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11464 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11466 if (insn_code_number
< 0)
11467 fputs ("Failed to match this instruction:\n", dump_file
);
11469 fputs ("Successfully matched this instruction:\n", dump_file
);
11470 print_rtl_single (dump_file
, pat
);
11473 /* If it isn't, there is the possibility that we previously had an insn
11474 that clobbered some register as a side effect, but the combined
11475 insn doesn't need to do that. So try once more without the clobbers
11476 unless this represents an ASM insn. */
11478 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11479 && GET_CODE (pat
) == PARALLEL
)
11483 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11484 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11487 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11491 SUBST_INT (XVECLEN (pat
, 0), pos
);
11494 pat
= XVECEXP (pat
, 0, 0);
11496 PATTERN (insn
) = pat
;
11497 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11498 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11500 if (insn_code_number
< 0)
11501 fputs ("Failed to match this instruction:\n", dump_file
);
11503 fputs ("Successfully matched this instruction:\n", dump_file
);
11504 print_rtl_single (dump_file
, pat
);
11508 pat_without_clobbers
= pat
;
11510 PATTERN (insn
) = old_pat
;
11511 REG_NOTES (insn
) = old_notes
;
11513 /* Recognize all noop sets, these will be killed by followup pass. */
11514 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11515 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11517 /* If we had any clobbers to add, make a new pattern than contains
11518 them. Then check to make sure that all of them are dead. */
11519 if (num_clobbers_to_add
)
11521 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11522 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11523 ? (XVECLEN (pat
, 0)
11524 + num_clobbers_to_add
)
11525 : num_clobbers_to_add
+ 1));
11527 if (GET_CODE (pat
) == PARALLEL
)
11528 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11529 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11531 XVECEXP (newpat
, 0, 0) = pat
;
11533 add_clobbers (newpat
, insn_code_number
);
11535 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11536 i
< XVECLEN (newpat
, 0); i
++)
11538 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11539 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11541 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11543 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11544 notes
= alloc_reg_note (REG_UNUSED
,
11545 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11551 if (insn_code_number
>= 0
11552 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11554 old_pat
= PATTERN (insn
);
11555 old_notes
= REG_NOTES (insn
);
11556 old_icode
= INSN_CODE (insn
);
11557 PATTERN (insn
) = pat
;
11558 REG_NOTES (insn
) = notes
;
11559 INSN_CODE (insn
) = insn_code_number
;
11561 /* Allow targets to reject combined insn. */
11562 if (!targetm
.legitimate_combined_insn (insn
))
11564 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11565 fputs ("Instruction not appropriate for target.",
11568 /* Callers expect recog_for_combine to strip
11569 clobbers from the pattern on failure. */
11570 pat
= pat_without_clobbers
;
11573 insn_code_number
= -1;
11576 PATTERN (insn
) = old_pat
;
11577 REG_NOTES (insn
) = old_notes
;
11578 INSN_CODE (insn
) = old_icode
;
11584 return insn_code_number
;
11587 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11588 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11589 Return whether anything was so changed. */
11592 change_zero_ext (rtx pat
)
11594 bool changed
= false;
11595 rtx
*src
= &SET_SRC (pat
);
11597 subrtx_ptr_iterator::array_type array
;
11598 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11601 scalar_int_mode mode
, inner_mode
;
11602 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11606 if (GET_CODE (x
) == ZERO_EXTRACT
11607 && CONST_INT_P (XEXP (x
, 1))
11608 && CONST_INT_P (XEXP (x
, 2))
11609 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11610 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11612 size
= INTVAL (XEXP (x
, 1));
11614 int start
= INTVAL (XEXP (x
, 2));
11615 if (BITS_BIG_ENDIAN
)
11616 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11619 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0),
11620 gen_int_shift_amount (inner_mode
, start
));
11624 if (mode
!= inner_mode
)
11626 if (REG_P (x
) && HARD_REGISTER_P (x
)
11627 && !can_change_dest_mode (x
, 0, mode
))
11630 x
= gen_lowpart_SUBREG (mode
, x
);
11633 else if (GET_CODE (x
) == ZERO_EXTEND
11634 && GET_CODE (XEXP (x
, 0)) == SUBREG
11635 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11636 && !paradoxical_subreg_p (XEXP (x
, 0))
11637 && subreg_lowpart_p (XEXP (x
, 0)))
11639 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11640 size
= GET_MODE_PRECISION (inner_mode
);
11641 x
= SUBREG_REG (XEXP (x
, 0));
11642 if (GET_MODE (x
) != mode
)
11644 if (REG_P (x
) && HARD_REGISTER_P (x
)
11645 && !can_change_dest_mode (x
, 0, mode
))
11648 x
= gen_lowpart_SUBREG (mode
, x
);
11651 else if (GET_CODE (x
) == ZERO_EXTEND
11652 && REG_P (XEXP (x
, 0))
11653 && HARD_REGISTER_P (XEXP (x
, 0))
11654 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11656 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11657 size
= GET_MODE_PRECISION (inner_mode
);
11658 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11663 if (!(GET_CODE (x
) == LSHIFTRT
11664 && CONST_INT_P (XEXP (x
, 1))
11665 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11667 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11668 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11676 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11677 maybe_swap_commutative_operands (**iter
);
11679 rtx
*dst
= &SET_DEST (pat
);
11680 scalar_int_mode mode
;
11681 if (GET_CODE (*dst
) == ZERO_EXTRACT
11682 && REG_P (XEXP (*dst
, 0))
11683 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11684 && CONST_INT_P (XEXP (*dst
, 1))
11685 && CONST_INT_P (XEXP (*dst
, 2)))
11687 rtx reg
= XEXP (*dst
, 0);
11688 int width
= INTVAL (XEXP (*dst
, 1));
11689 int offset
= INTVAL (XEXP (*dst
, 2));
11690 int reg_width
= GET_MODE_PRECISION (mode
);
11691 if (BITS_BIG_ENDIAN
)
11692 offset
= reg_width
- width
- offset
;
11695 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11696 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11697 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11699 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11702 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11703 w
= gen_rtx_IOR (mode
, x
, z
);
11704 SUBST (SET_DEST (pat
), reg
);
11705 SUBST (SET_SRC (pat
), w
);
11713 /* Like recog, but we receive the address of a pointer to a new pattern.
11714 We try to match the rtx that the pointer points to.
11715 If that fails, we may try to modify or replace the pattern,
11716 storing the replacement into the same pointer object.
11718 Modifications include deletion or addition of CLOBBERs. If the
11719 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11720 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11721 (and undo if that fails).
11723 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11724 the CLOBBERs are placed.
11726 The value is the final insn code from the pattern ultimately matched,
11730 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11732 rtx pat
= *pnewpat
;
11733 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11734 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11735 return insn_code_number
;
11737 void *marker
= get_undo_marker ();
11738 bool changed
= false;
11740 if (GET_CODE (pat
) == SET
)
11741 changed
= change_zero_ext (pat
);
11742 else if (GET_CODE (pat
) == PARALLEL
)
11745 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11747 rtx set
= XVECEXP (pat
, 0, i
);
11748 if (GET_CODE (set
) == SET
)
11749 changed
|= change_zero_ext (set
);
11755 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11757 if (insn_code_number
< 0)
11758 undo_to_marker (marker
);
11761 return insn_code_number
;
11764 /* Like gen_lowpart_general but for use by combine. In combine it
11765 is not possible to create any new pseudoregs. However, it is
11766 safe to create invalid memory addresses, because combine will
11767 try to recognize them and all they will do is make the combine
11770 If for some reason this cannot do its job, an rtx
11771 (clobber (const_int 0)) is returned.
11772 An insn containing that will not be recognized. */
11775 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11777 machine_mode imode
= GET_MODE (x
);
11780 if (omode
== imode
)
11783 /* We can only support MODE being wider than a word if X is a
11784 constant integer or has a mode the same size. */
11785 if (maybe_gt (GET_MODE_SIZE (omode
), UNITS_PER_WORD
)
11786 && ! (CONST_SCALAR_INT_P (x
)
11787 || known_eq (GET_MODE_SIZE (imode
), GET_MODE_SIZE (omode
))))
11790 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11791 won't know what to do. So we will strip off the SUBREG here and
11792 process normally. */
11793 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11795 x
= SUBREG_REG (x
);
11797 /* For use in case we fall down into the address adjustments
11798 further below, we need to adjust the known mode and size of
11799 x; imode and isize, since we just adjusted x. */
11800 imode
= GET_MODE (x
);
11802 if (imode
== omode
)
11806 result
= gen_lowpart_common (omode
, x
);
11813 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11815 if (MEM_VOLATILE_P (x
)
11816 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11819 /* If we want to refer to something bigger than the original memref,
11820 generate a paradoxical subreg instead. That will force a reload
11821 of the original memref X. */
11822 if (paradoxical_subreg_p (omode
, imode
))
11823 return gen_rtx_SUBREG (omode
, x
, 0);
11825 poly_int64 offset
= byte_lowpart_offset (omode
, imode
);
11826 return adjust_address_nv (x
, omode
, offset
);
11829 /* If X is a comparison operator, rewrite it in a new mode. This
11830 probably won't match, but may allow further simplifications. */
11831 else if (COMPARISON_P (x
))
11832 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11834 /* If we couldn't simplify X any other way, just enclose it in a
11835 SUBREG. Normally, this SUBREG won't match, but some patterns may
11836 include an explicit SUBREG or we may simplify it further in combine. */
11841 if (imode
== VOIDmode
)
11843 imode
= int_mode_for_mode (omode
).require ();
11844 x
= gen_lowpart_common (imode
, x
);
11848 res
= lowpart_subreg (omode
, x
, imode
);
11854 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11857 /* Try to simplify a comparison between OP0 and a constant OP1,
11858 where CODE is the comparison code that will be tested, into a
11859 (CODE OP0 const0_rtx) form.
11861 The result is a possibly different comparison code to use.
11862 *POP1 may be updated. */
11864 static enum rtx_code
11865 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11866 rtx op0
, rtx
*pop1
)
11868 scalar_int_mode int_mode
;
11869 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11871 /* Get the constant we are comparing against and turn off all bits
11872 not on in our mode. */
11873 if (mode
!= VOIDmode
)
11874 const_op
= trunc_int_for_mode (const_op
, mode
);
11876 /* If we are comparing against a constant power of two and the value
11877 being compared can only have that single bit nonzero (e.g., it was
11878 `and'ed with that bit), we can replace this with a comparison
11881 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11882 || code
== LT
|| code
== LTU
)
11883 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11884 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11885 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11886 && (nonzero_bits (op0
, int_mode
)
11887 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11889 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11893 /* Similarly, if we are comparing a value known to be either -1 or
11894 0 with -1, change it to the opposite comparison against zero. */
11896 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11897 || code
== GEU
|| code
== LTU
)
11898 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11899 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11901 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11905 /* Do some canonicalizations based on the comparison code. We prefer
11906 comparisons against zero and then prefer equality comparisons.
11907 If we can reduce the size of a constant, we will do that too. */
11911 /* < C is equivalent to <= (C - 1) */
11916 /* ... fall through to LE case below. */
11917 gcc_fallthrough ();
11923 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11930 /* If we are doing a <= 0 comparison on a value known to have
11931 a zero sign bit, we can replace this with == 0. */
11932 else if (const_op
== 0
11933 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11934 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11935 && (nonzero_bits (op0
, int_mode
)
11936 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11942 /* >= C is equivalent to > (C - 1). */
11947 /* ... fall through to GT below. */
11948 gcc_fallthrough ();
11954 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11961 /* If we are doing a > 0 comparison on a value known to have
11962 a zero sign bit, we can replace this with != 0. */
11963 else if (const_op
== 0
11964 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11965 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11966 && (nonzero_bits (op0
, int_mode
)
11967 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11973 /* < C is equivalent to <= (C - 1). */
11978 /* ... fall through ... */
11979 gcc_fallthrough ();
11981 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11982 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11983 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11984 && ((unsigned HOST_WIDE_INT
) const_op
11985 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11995 /* unsigned <= 0 is equivalent to == 0 */
11998 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11999 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
12000 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
12001 && ((unsigned HOST_WIDE_INT
) const_op
12002 == ((HOST_WIDE_INT_1U
12003 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
12011 /* >= C is equivalent to > (C - 1). */
12016 /* ... fall through ... */
12017 gcc_fallthrough ();
12020 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
12021 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
12022 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
12023 && ((unsigned HOST_WIDE_INT
) const_op
12024 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
12034 /* unsigned > 0 is equivalent to != 0 */
12037 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
12038 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
12039 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
12040 && ((unsigned HOST_WIDE_INT
) const_op
12041 == (HOST_WIDE_INT_1U
12042 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
12053 *pop1
= GEN_INT (const_op
);
12057 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12058 comparison code that will be tested.
12060 The result is a possibly different comparison code to use. *POP0 and
12061 *POP1 may be updated.
12063 It is possible that we might detect that a comparison is either always
12064 true or always false. However, we do not perform general constant
12065 folding in combine, so this knowledge isn't useful. Such tautologies
12066 should have been detected earlier. Hence we ignore all such cases. */
12068 static enum rtx_code
12069 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
12075 scalar_int_mode mode
, inner_mode
, tmode
;
12076 opt_scalar_int_mode tmode_iter
;
12078 /* Try a few ways of applying the same transformation to both operands. */
12081 /* The test below this one won't handle SIGN_EXTENDs on these machines,
12082 so check specially. */
12083 if (!WORD_REGISTER_OPERATIONS
12084 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
12085 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
12086 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12087 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
12088 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
12089 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
12090 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
12091 && (is_a
<scalar_int_mode
>
12092 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
12093 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
12094 && CONST_INT_P (XEXP (op0
, 1))
12095 && XEXP (op0
, 1) == XEXP (op1
, 1)
12096 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12097 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
12098 && (INTVAL (XEXP (op0
, 1))
12099 == (GET_MODE_PRECISION (mode
)
12100 - GET_MODE_PRECISION (inner_mode
))))
12102 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
12103 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
12106 /* If both operands are the same constant shift, see if we can ignore the
12107 shift. We can if the shift is a rotate or if the bits shifted out of
12108 this shift are known to be zero for both inputs and if the type of
12109 comparison is compatible with the shift. */
12110 if (GET_CODE (op0
) == GET_CODE (op1
)
12111 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
12112 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
12113 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
12114 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
12115 || (GET_CODE (op0
) == ASHIFTRT
12116 && (code
!= GTU
&& code
!= LTU
12117 && code
!= GEU
&& code
!= LEU
)))
12118 && CONST_INT_P (XEXP (op0
, 1))
12119 && INTVAL (XEXP (op0
, 1)) >= 0
12120 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12121 && XEXP (op0
, 1) == XEXP (op1
, 1))
12123 machine_mode mode
= GET_MODE (op0
);
12124 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12125 int shift_count
= INTVAL (XEXP (op0
, 1));
12127 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
12128 mask
&= (mask
>> shift_count
) << shift_count
;
12129 else if (GET_CODE (op0
) == ASHIFT
)
12130 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
12132 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
12133 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
12134 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
12139 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12140 SUBREGs are of the same mode, and, in both cases, the AND would
12141 be redundant if the comparison was done in the narrower mode,
12142 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12143 and the operand's possibly nonzero bits are 0xffffff01; in that case
12144 if we only care about QImode, we don't need the AND). This case
12145 occurs if the output mode of an scc insn is not SImode and
12146 STORE_FLAG_VALUE == 1 (e.g., the 386).
12148 Similarly, check for a case where the AND's are ZERO_EXTEND
12149 operations from some narrower mode even though a SUBREG is not
12152 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
12153 && CONST_INT_P (XEXP (op0
, 1))
12154 && CONST_INT_P (XEXP (op1
, 1)))
12156 rtx inner_op0
= XEXP (op0
, 0);
12157 rtx inner_op1
= XEXP (op1
, 0);
12158 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
12159 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
12162 if (paradoxical_subreg_p (inner_op0
)
12163 && GET_CODE (inner_op1
) == SUBREG
12164 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0
)))
12165 && (GET_MODE (SUBREG_REG (inner_op0
))
12166 == GET_MODE (SUBREG_REG (inner_op1
)))
12167 && ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
12168 GET_MODE (SUBREG_REG (inner_op0
)))) == 0
12169 && ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
12170 GET_MODE (SUBREG_REG (inner_op1
)))) == 0)
12172 op0
= SUBREG_REG (inner_op0
);
12173 op1
= SUBREG_REG (inner_op1
);
12175 /* The resulting comparison is always unsigned since we masked
12176 off the original sign bit. */
12177 code
= unsigned_condition (code
);
12183 FOR_EACH_MODE_UNTIL (tmode
,
12184 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
12185 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
12187 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
12188 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
12189 code
= unsigned_condition (code
);
12198 /* If both operands are NOT, we can strip off the outer operation
12199 and adjust the comparison code for swapped operands; similarly for
12200 NEG, except that this must be an equality comparison. */
12201 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
12202 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
12203 && (code
== EQ
|| code
== NE
)))
12204 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
12210 /* If the first operand is a constant, swap the operands and adjust the
12211 comparison code appropriately, but don't do this if the second operand
12212 is already a constant integer. */
12213 if (swap_commutative_operands_p (op0
, op1
))
12215 std::swap (op0
, op1
);
12216 code
= swap_condition (code
);
12219 /* We now enter a loop during which we will try to simplify the comparison.
12220 For the most part, we only are concerned with comparisons with zero,
12221 but some things may really be comparisons with zero but not start
12222 out looking that way. */
12224 while (CONST_INT_P (op1
))
12226 machine_mode raw_mode
= GET_MODE (op0
);
12227 scalar_int_mode int_mode
;
12228 int equality_comparison_p
;
12229 int sign_bit_comparison_p
;
12230 int unsigned_comparison_p
;
12231 HOST_WIDE_INT const_op
;
12233 /* We only want to handle integral modes. This catches VOIDmode,
12234 CCmode, and the floating-point modes. An exception is that we
12235 can handle VOIDmode if OP0 is a COMPARE or a comparison
12238 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12239 && ! (raw_mode
== VOIDmode
12240 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12243 /* Try to simplify the compare to constant, possibly changing the
12244 comparison op, and/or changing op1 to zero. */
12245 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12246 const_op
= INTVAL (op1
);
12248 /* Compute some predicates to simplify code below. */
12250 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12251 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12252 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12255 /* If this is a sign bit comparison and we can do arithmetic in
12256 MODE, say that we will only be needing the sign bit of OP0. */
12257 if (sign_bit_comparison_p
12258 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12259 && HWI_COMPUTABLE_MODE_P (int_mode
))
12260 op0
= force_to_mode (op0
, int_mode
,
12262 << (GET_MODE_PRECISION (int_mode
) - 1),
12265 if (COMPARISON_P (op0
))
12267 /* We can't do anything if OP0 is a condition code value, rather
12268 than an actual data value. */
12270 || CC0_P (XEXP (op0
, 0))
12271 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12274 /* Get the two operands being compared. */
12275 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12276 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12278 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12280 /* Check for the cases where we simply want the result of the
12281 earlier test or the opposite of that result. */
12282 if (code
== NE
|| code
== EQ
12283 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12284 && (code
== LT
|| code
== GE
)))
12286 enum rtx_code new_code
;
12287 if (code
== LT
|| code
== NE
)
12288 new_code
= GET_CODE (op0
);
12290 new_code
= reversed_comparison_code (op0
, NULL
);
12292 if (new_code
!= UNKNOWN
)
12303 if (raw_mode
== VOIDmode
)
12305 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12307 /* Now try cases based on the opcode of OP0. If none of the cases
12308 does a "continue", we exit this loop immediately after the
12311 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12312 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12313 switch (GET_CODE (op0
))
12316 /* If we are extracting a single bit from a variable position in
12317 a constant that has only a single bit set and are comparing it
12318 with zero, we can convert this into an equality comparison
12319 between the position and the location of the single bit. */
12320 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12321 have already reduced the shift count modulo the word size. */
12322 if (!SHIFT_COUNT_TRUNCATED
12323 && CONST_INT_P (XEXP (op0
, 0))
12324 && XEXP (op0
, 1) == const1_rtx
12325 && equality_comparison_p
&& const_op
== 0
12326 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12328 if (BITS_BIG_ENDIAN
)
12329 i
= BITS_PER_WORD
- 1 - i
;
12331 op0
= XEXP (op0
, 2);
12335 /* Result is nonzero iff shift count is equal to I. */
12336 code
= reverse_condition (code
);
12343 tem
= expand_compound_operation (op0
);
12352 /* If testing for equality, we can take the NOT of the constant. */
12353 if (equality_comparison_p
12354 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12356 op0
= XEXP (op0
, 0);
12361 /* If just looking at the sign bit, reverse the sense of the
12363 if (sign_bit_comparison_p
)
12365 op0
= XEXP (op0
, 0);
12366 code
= (code
== GE
? LT
: GE
);
12372 /* If testing for equality, we can take the NEG of the constant. */
12373 if (equality_comparison_p
12374 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12376 op0
= XEXP (op0
, 0);
12381 /* The remaining cases only apply to comparisons with zero. */
12385 /* When X is ABS or is known positive,
12386 (neg X) is < 0 if and only if X != 0. */
12388 if (sign_bit_comparison_p
12389 && (GET_CODE (XEXP (op0
, 0)) == ABS
12390 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12391 && (nonzero_bits (XEXP (op0
, 0), mode
)
12392 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12395 op0
= XEXP (op0
, 0);
12396 code
= (code
== LT
? NE
: EQ
);
12400 /* If we have NEG of something whose two high-order bits are the
12401 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12402 if (num_sign_bit_copies (op0
, mode
) >= 2)
12404 op0
= XEXP (op0
, 0);
12405 code
= swap_condition (code
);
12411 /* If we are testing equality and our count is a constant, we
12412 can perform the inverse operation on our RHS. */
12413 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12414 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12415 op1
, XEXP (op0
, 1))) != 0)
12417 op0
= XEXP (op0
, 0);
12422 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12423 a particular bit. Convert it to an AND of a constant of that
12424 bit. This will be converted into a ZERO_EXTRACT. */
12425 if (const_op
== 0 && sign_bit_comparison_p
12426 && CONST_INT_P (XEXP (op0
, 1))
12427 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12429 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12432 - INTVAL (XEXP (op0
, 1)))));
12433 code
= (code
== LT
? NE
: EQ
);
12437 /* Fall through. */
12440 /* ABS is ignorable inside an equality comparison with zero. */
12441 if (const_op
== 0 && equality_comparison_p
)
12443 op0
= XEXP (op0
, 0);
12449 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12450 (compare FOO CONST) if CONST fits in FOO's mode and we
12451 are either testing inequality or have an unsigned
12452 comparison with ZERO_EXTEND or a signed comparison with
12453 SIGN_EXTEND. But don't do it if we don't have a compare
12454 insn of the given mode, since we'd have to revert it
12455 later on, and then we wouldn't know whether to sign- or
12457 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12458 && ! unsigned_comparison_p
12459 && HWI_COMPUTABLE_MODE_P (mode
)
12460 && trunc_int_for_mode (const_op
, mode
) == const_op
12461 && have_insn_for (COMPARE
, mode
))
12463 op0
= XEXP (op0
, 0);
12469 /* Check for the case where we are comparing A - C1 with C2, that is
12471 (subreg:MODE (plus (A) (-C1))) op (C2)
12473 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12474 comparison in the wider mode. One of the following two conditions
12475 must be true in order for this to be valid:
12477 1. The mode extension results in the same bit pattern being added
12478 on both sides and the comparison is equality or unsigned. As
12479 C2 has been truncated to fit in MODE, the pattern can only be
12482 2. The mode extension results in the sign bit being copied on
12485 The difficulty here is that we have predicates for A but not for
12486 (A - C1) so we need to check that C1 is within proper bounds so
12487 as to perturbate A as little as possible. */
12489 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12490 && subreg_lowpart_p (op0
)
12491 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12493 && GET_MODE_PRECISION (inner_mode
) > mode_width
12494 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12495 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12497 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12498 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12501 && (unsigned HOST_WIDE_INT
) c1
12502 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12503 && (equality_comparison_p
|| unsigned_comparison_p
)
12504 /* (A - C1) zero-extends if it is positive and sign-extends
12505 if it is negative, C2 both zero- and sign-extends. */
12506 && (((nonzero_bits (a
, inner_mode
)
12507 & ~GET_MODE_MASK (mode
)) == 0
12509 /* (A - C1) sign-extends if it is positive and 1-extends
12510 if it is negative, C2 both sign- and 1-extends. */
12511 || (num_sign_bit_copies (a
, inner_mode
)
12512 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12515 || ((unsigned HOST_WIDE_INT
) c1
12516 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12517 /* (A - C1) always sign-extends, like C2. */
12518 && num_sign_bit_copies (a
, inner_mode
)
12519 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12520 - (mode_width
- 1))))
12522 op0
= SUBREG_REG (op0
);
12527 /* If the inner mode is narrower and we are extracting the low part,
12528 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12529 if (paradoxical_subreg_p (op0
))
12531 else if (subreg_lowpart_p (op0
)
12532 && GET_MODE_CLASS (mode
) == MODE_INT
12533 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12534 && (code
== NE
|| code
== EQ
)
12535 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12536 && !paradoxical_subreg_p (op0
)
12537 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12538 & ~GET_MODE_MASK (mode
)) == 0)
12540 /* Remove outer subregs that don't do anything. */
12541 tem
= gen_lowpart (inner_mode
, op1
);
12543 if ((nonzero_bits (tem
, inner_mode
)
12544 & ~GET_MODE_MASK (mode
)) == 0)
12546 op0
= SUBREG_REG (op0
);
12558 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12559 && (unsigned_comparison_p
|| equality_comparison_p
)
12560 && HWI_COMPUTABLE_MODE_P (mode
)
12561 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12563 && have_insn_for (COMPARE
, mode
))
12565 op0
= XEXP (op0
, 0);
12571 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12572 this for equality comparisons due to pathological cases involving
12574 if (equality_comparison_p
12575 && (tem
= simplify_binary_operation (MINUS
, mode
,
12576 op1
, XEXP (op0
, 1))) != 0)
12578 op0
= XEXP (op0
, 0);
12583 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12584 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12585 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12587 op0
= XEXP (XEXP (op0
, 0), 0);
12588 code
= (code
== LT
? EQ
: NE
);
12594 /* We used to optimize signed comparisons against zero, but that
12595 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12596 arrive here as equality comparisons, or (GEU, LTU) are
12597 optimized away. No need to special-case them. */
12599 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12600 (eq B (minus A C)), whichever simplifies. We can only do
12601 this for equality comparisons due to pathological cases involving
12603 if (equality_comparison_p
12604 && (tem
= simplify_binary_operation (PLUS
, mode
,
12605 XEXP (op0
, 1), op1
)) != 0)
12607 op0
= XEXP (op0
, 0);
12612 if (equality_comparison_p
12613 && (tem
= simplify_binary_operation (MINUS
, mode
,
12614 XEXP (op0
, 0), op1
)) != 0)
12616 op0
= XEXP (op0
, 1);
12621 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12622 of bits in X minus 1, is one iff X > 0. */
12623 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12624 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12625 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12626 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12628 op0
= XEXP (op0
, 1);
12629 code
= (code
== GE
? LE
: GT
);
12635 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12636 if C is zero or B is a constant. */
12637 if (equality_comparison_p
12638 && (tem
= simplify_binary_operation (XOR
, mode
,
12639 XEXP (op0
, 1), op1
)) != 0)
12641 op0
= XEXP (op0
, 0);
12649 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12651 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12652 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12653 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12655 op0
= XEXP (op0
, 1);
12656 code
= (code
== GE
? GT
: LE
);
12662 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12663 will be converted to a ZERO_EXTRACT later. */
12664 if (const_op
== 0 && equality_comparison_p
12665 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12666 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12668 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12669 XEXP (XEXP (op0
, 0), 1));
12670 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12674 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12675 zero and X is a comparison and C1 and C2 describe only bits set
12676 in STORE_FLAG_VALUE, we can compare with X. */
12677 if (const_op
== 0 && equality_comparison_p
12678 && mode_width
<= HOST_BITS_PER_WIDE_INT
12679 && CONST_INT_P (XEXP (op0
, 1))
12680 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12681 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12682 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12683 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12685 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12686 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12687 if ((~STORE_FLAG_VALUE
& mask
) == 0
12688 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12689 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12690 && COMPARISON_P (tem
))))
12692 op0
= XEXP (XEXP (op0
, 0), 0);
12697 /* If we are doing an equality comparison of an AND of a bit equal
12698 to the sign bit, replace this with a LT or GE comparison of
12699 the underlying value. */
12700 if (equality_comparison_p
12702 && CONST_INT_P (XEXP (op0
, 1))
12703 && mode_width
<= HOST_BITS_PER_WIDE_INT
12704 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12705 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12707 op0
= XEXP (op0
, 0);
12708 code
= (code
== EQ
? GE
: LT
);
12712 /* If this AND operation is really a ZERO_EXTEND from a narrower
12713 mode, the constant fits within that mode, and this is either an
12714 equality or unsigned comparison, try to do this comparison in
12719 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12720 -> (ne:DI (reg:SI 4) (const_int 0))
12722 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12723 known to hold a value of the required mode the
12724 transformation is invalid. */
12725 if ((equality_comparison_p
|| unsigned_comparison_p
)
12726 && CONST_INT_P (XEXP (op0
, 1))
12727 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12728 & GET_MODE_MASK (mode
))
12730 && const_op
>> i
== 0
12731 && int_mode_for_size (i
, 1).exists (&tmode
))
12733 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12737 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12738 fits in both M1 and M2 and the SUBREG is either paradoxical
12739 or represents the low part, permute the SUBREG and the AND
12741 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12742 && CONST_INT_P (XEXP (op0
, 1)))
12744 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12745 /* Require an integral mode, to avoid creating something like
12747 if ((is_a
<scalar_int_mode
>
12748 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12749 /* It is unsafe to commute the AND into the SUBREG if the
12750 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12751 not defined. As originally written the upper bits
12752 have a defined value due to the AND operation.
12753 However, if we commute the AND inside the SUBREG then
12754 they no longer have defined values and the meaning of
12755 the code has been changed.
12756 Also C1 should not change value in the smaller mode,
12757 see PR67028 (a positive C1 can become negative in the
12758 smaller mode, so that the AND does no longer mask the
12760 && ((WORD_REGISTER_OPERATIONS
12761 && mode_width
> GET_MODE_PRECISION (tmode
)
12762 && mode_width
<= BITS_PER_WORD
12763 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12764 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12765 && subreg_lowpart_p (XEXP (op0
, 0))))
12766 && mode_width
<= HOST_BITS_PER_WIDE_INT
12767 && HWI_COMPUTABLE_MODE_P (tmode
)
12768 && (c1
& ~mask
) == 0
12769 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12771 && c1
!= GET_MODE_MASK (tmode
))
12773 op0
= simplify_gen_binary (AND
, tmode
,
12774 SUBREG_REG (XEXP (op0
, 0)),
12775 gen_int_mode (c1
, tmode
));
12776 op0
= gen_lowpart (mode
, op0
);
12781 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12782 if (const_op
== 0 && equality_comparison_p
12783 && XEXP (op0
, 1) == const1_rtx
12784 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12786 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12787 XEXP (XEXP (op0
, 0), 0), 1);
12788 code
= (code
== NE
? EQ
: NE
);
12792 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12793 (eq (and (lshiftrt X) 1) 0).
12794 Also handle the case where (not X) is expressed using xor. */
12795 if (const_op
== 0 && equality_comparison_p
12796 && XEXP (op0
, 1) == const1_rtx
12797 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12799 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12800 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12802 if (GET_CODE (shift_op
) == NOT
12803 || (GET_CODE (shift_op
) == XOR
12804 && CONST_INT_P (XEXP (shift_op
, 1))
12805 && CONST_INT_P (shift_count
)
12806 && HWI_COMPUTABLE_MODE_P (mode
)
12807 && (UINTVAL (XEXP (shift_op
, 1))
12808 == HOST_WIDE_INT_1U
12809 << INTVAL (shift_count
))))
12812 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12813 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12814 code
= (code
== NE
? EQ
: NE
);
12821 /* If we have (compare (ashift FOO N) (const_int C)) and
12822 the high order N bits of FOO (N+1 if an inequality comparison)
12823 are known to be zero, we can do this by comparing FOO with C
12824 shifted right N bits so long as the low-order N bits of C are
12826 if (CONST_INT_P (XEXP (op0
, 1))
12827 && INTVAL (XEXP (op0
, 1)) >= 0
12828 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12829 < HOST_BITS_PER_WIDE_INT
)
12830 && (((unsigned HOST_WIDE_INT
) const_op
12831 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12833 && mode_width
<= HOST_BITS_PER_WIDE_INT
12834 && (nonzero_bits (XEXP (op0
, 0), mode
)
12835 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12836 + ! equality_comparison_p
))) == 0)
12838 /* We must perform a logical shift, not an arithmetic one,
12839 as we want the top N bits of C to be zero. */
12840 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12842 temp
>>= INTVAL (XEXP (op0
, 1));
12843 op1
= gen_int_mode (temp
, mode
);
12844 op0
= XEXP (op0
, 0);
12848 /* If we are doing a sign bit comparison, it means we are testing
12849 a particular bit. Convert it to the appropriate AND. */
12850 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12851 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12853 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12856 - INTVAL (XEXP (op0
, 1)))));
12857 code
= (code
== LT
? NE
: EQ
);
12861 /* If this an equality comparison with zero and we are shifting
12862 the low bit to the sign bit, we can convert this to an AND of the
12864 if (const_op
== 0 && equality_comparison_p
12865 && CONST_INT_P (XEXP (op0
, 1))
12866 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12868 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12874 /* If this is an equality comparison with zero, we can do this
12875 as a logical shift, which might be much simpler. */
12876 if (equality_comparison_p
&& const_op
== 0
12877 && CONST_INT_P (XEXP (op0
, 1)))
12879 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12881 INTVAL (XEXP (op0
, 1)));
12885 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12886 do the comparison in a narrower mode. */
12887 if (! unsigned_comparison_p
12888 && CONST_INT_P (XEXP (op0
, 1))
12889 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12890 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12891 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12893 && (((unsigned HOST_WIDE_INT
) const_op
12894 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12895 <= GET_MODE_MASK (tmode
)))
12897 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12901 /* Likewise if OP0 is a PLUS of a sign extension with a
12902 constant, which is usually represented with the PLUS
12903 between the shifts. */
12904 if (! unsigned_comparison_p
12905 && CONST_INT_P (XEXP (op0
, 1))
12906 && GET_CODE (XEXP (op0
, 0)) == PLUS
12907 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12908 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12909 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12910 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12912 && (((unsigned HOST_WIDE_INT
) const_op
12913 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12914 <= GET_MODE_MASK (tmode
)))
12916 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12917 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12918 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12919 add_const
, XEXP (op0
, 1));
12921 op0
= simplify_gen_binary (PLUS
, tmode
,
12922 gen_lowpart (tmode
, inner
),
12929 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12930 the low order N bits of FOO are known to be zero, we can do this
12931 by comparing FOO with C shifted left N bits so long as no
12932 overflow occurs. Even if the low order N bits of FOO aren't known
12933 to be zero, if the comparison is >= or < we can use the same
12934 optimization and for > or <= by setting all the low
12935 order N bits in the comparison constant. */
12936 if (CONST_INT_P (XEXP (op0
, 1))
12937 && INTVAL (XEXP (op0
, 1)) > 0
12938 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12939 && mode_width
<= HOST_BITS_PER_WIDE_INT
12940 && (((unsigned HOST_WIDE_INT
) const_op
12941 + (GET_CODE (op0
) != LSHIFTRT
12942 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12945 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12947 unsigned HOST_WIDE_INT low_bits
12948 = (nonzero_bits (XEXP (op0
, 0), mode
)
12949 & ((HOST_WIDE_INT_1U
12950 << INTVAL (XEXP (op0
, 1))) - 1));
12951 if (low_bits
== 0 || !equality_comparison_p
)
12953 /* If the shift was logical, then we must make the condition
12955 if (GET_CODE (op0
) == LSHIFTRT
)
12956 code
= unsigned_condition (code
);
12958 const_op
= (unsigned HOST_WIDE_INT
) const_op
12959 << INTVAL (XEXP (op0
, 1));
12961 && (code
== GT
|| code
== GTU
12962 || code
== LE
|| code
== LEU
))
12964 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12965 op1
= GEN_INT (const_op
);
12966 op0
= XEXP (op0
, 0);
12971 /* If we are using this shift to extract just the sign bit, we
12972 can replace this with an LT or GE comparison. */
12974 && (equality_comparison_p
|| sign_bit_comparison_p
)
12975 && CONST_INT_P (XEXP (op0
, 1))
12976 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12978 op0
= XEXP (op0
, 0);
12979 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12991 /* Now make any compound operations involved in this comparison. Then,
12992 check for an outmost SUBREG on OP0 that is not doing anything or is
12993 paradoxical. The latter transformation must only be performed when
12994 it is known that the "extra" bits will be the same in op0 and op1 or
12995 that they don't matter. There are three cases to consider:
12997 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12998 care bits and we can assume they have any convenient value. So
12999 making the transformation is safe.
13001 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
13002 In this case the upper bits of op0 are undefined. We should not make
13003 the simplification in that case as we do not know the contents of
13006 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
13007 In that case we know those bits are zeros or ones. We must also be
13008 sure that they are the same as the upper bits of op1.
13010 We can never remove a SUBREG for a non-equality comparison because
13011 the sign bit is in a different place in the underlying object. */
13013 rtx_code op0_mco_code
= SET
;
13014 if (op1
== const0_rtx
)
13015 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
13017 op0
= make_compound_operation (op0
, op0_mco_code
);
13018 op1
= make_compound_operation (op1
, SET
);
13020 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
13021 && is_int_mode (GET_MODE (op0
), &mode
)
13022 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
13023 && (code
== NE
|| code
== EQ
))
13025 if (paradoxical_subreg_p (op0
))
13027 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
13029 if (REG_P (SUBREG_REG (op0
)))
13031 op0
= SUBREG_REG (op0
);
13032 op1
= gen_lowpart (inner_mode
, op1
);
13035 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
13036 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
13037 & ~GET_MODE_MASK (mode
)) == 0)
13039 tem
= gen_lowpart (inner_mode
, op1
);
13041 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
13042 op0
= SUBREG_REG (op0
), op1
= tem
;
13046 /* We now do the opposite procedure: Some machines don't have compare
13047 insns in all modes. If OP0's mode is an integer mode smaller than a
13048 word and we can't do a compare in that mode, see if there is a larger
13049 mode for which we can do the compare. There are a number of cases in
13050 which we can use the wider mode. */
13052 if (is_int_mode (GET_MODE (op0
), &mode
)
13053 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
13054 && ! have_insn_for (COMPARE
, mode
))
13055 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
13057 tmode
= tmode_iter
.require ();
13058 if (!HWI_COMPUTABLE_MODE_P (tmode
))
13060 if (have_insn_for (COMPARE
, tmode
))
13064 /* If this is a test for negative, we can make an explicit
13065 test of the sign bit. Test this first so we can use
13066 a paradoxical subreg to extend OP0. */
13068 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
13069 && HWI_COMPUTABLE_MODE_P (mode
))
13071 unsigned HOST_WIDE_INT sign
13072 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
13073 op0
= simplify_gen_binary (AND
, tmode
,
13074 gen_lowpart (tmode
, op0
),
13075 gen_int_mode (sign
, tmode
));
13076 code
= (code
== LT
) ? NE
: EQ
;
13080 /* If the only nonzero bits in OP0 and OP1 are those in the
13081 narrower mode and this is an equality or unsigned comparison,
13082 we can use the wider mode. Similarly for sign-extended
13083 values, in which case it is true for all comparisons. */
13084 zero_extended
= ((code
== EQ
|| code
== NE
13085 || code
== GEU
|| code
== GTU
13086 || code
== LEU
|| code
== LTU
)
13087 && (nonzero_bits (op0
, tmode
)
13088 & ~GET_MODE_MASK (mode
)) == 0
13089 && ((CONST_INT_P (op1
)
13090 || (nonzero_bits (op1
, tmode
)
13091 & ~GET_MODE_MASK (mode
)) == 0)));
13094 || ((num_sign_bit_copies (op0
, tmode
)
13095 > (unsigned int) (GET_MODE_PRECISION (tmode
)
13096 - GET_MODE_PRECISION (mode
)))
13097 && (num_sign_bit_copies (op1
, tmode
)
13098 > (unsigned int) (GET_MODE_PRECISION (tmode
)
13099 - GET_MODE_PRECISION (mode
)))))
13101 /* If OP0 is an AND and we don't have an AND in MODE either,
13102 make a new AND in the proper mode. */
13103 if (GET_CODE (op0
) == AND
13104 && !have_insn_for (AND
, mode
))
13105 op0
= simplify_gen_binary (AND
, tmode
,
13106 gen_lowpart (tmode
,
13108 gen_lowpart (tmode
,
13114 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
13116 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
13121 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13123 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13132 /* We may have changed the comparison operands. Re-canonicalize. */
13133 if (swap_commutative_operands_p (op0
, op1
))
13135 std::swap (op0
, op1
);
13136 code
= swap_condition (code
);
13139 /* If this machine only supports a subset of valid comparisons, see if we
13140 can convert an unsupported one into a supported one. */
13141 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
13149 /* Utility function for record_value_for_reg. Count number of
13154 enum rtx_code code
= GET_CODE (x
);
13158 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
13159 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
13161 rtx x0
= XEXP (x
, 0);
13162 rtx x1
= XEXP (x
, 1);
13165 return 1 + 2 * count_rtxs (x0
);
13167 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
13168 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
13169 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13170 return 2 + 2 * count_rtxs (x0
)
13171 + count_rtxs (x
== XEXP (x1
, 0)
13172 ? XEXP (x1
, 1) : XEXP (x1
, 0));
13174 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
13175 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
13176 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13177 return 2 + 2 * count_rtxs (x1
)
13178 + count_rtxs (x
== XEXP (x0
, 0)
13179 ? XEXP (x0
, 1) : XEXP (x0
, 0));
13182 fmt
= GET_RTX_FORMAT (code
);
13183 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13185 ret
+= count_rtxs (XEXP (x
, i
));
13186 else if (fmt
[i
] == 'E')
13187 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13188 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
13193 /* Utility function for following routine. Called when X is part of a value
13194 being stored into last_set_value. Sets last_set_table_tick
13195 for each register mentioned. Similar to mention_regs in cse.c */
13198 update_table_tick (rtx x
)
13200 enum rtx_code code
= GET_CODE (x
);
13201 const char *fmt
= GET_RTX_FORMAT (code
);
13206 unsigned int regno
= REGNO (x
);
13207 unsigned int endregno
= END_REGNO (x
);
13210 for (r
= regno
; r
< endregno
; r
++)
13212 reg_stat_type
*rsp
= ®_stat
[r
];
13213 rsp
->last_set_table_tick
= label_tick
;
13219 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13222 /* Check for identical subexpressions. If x contains
13223 identical subexpression we only have to traverse one of
13225 if (i
== 0 && ARITHMETIC_P (x
))
13227 /* Note that at this point x1 has already been
13229 rtx x0
= XEXP (x
, 0);
13230 rtx x1
= XEXP (x
, 1);
13232 /* If x0 and x1 are identical then there is no need to
13237 /* If x0 is identical to a subexpression of x1 then while
13238 processing x1, x0 has already been processed. Thus we
13239 are done with x. */
13240 if (ARITHMETIC_P (x1
)
13241 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13244 /* If x1 is identical to a subexpression of x0 then we
13245 still have to process the rest of x0. */
13246 if (ARITHMETIC_P (x0
)
13247 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13249 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13254 update_table_tick (XEXP (x
, i
));
13256 else if (fmt
[i
] == 'E')
13257 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13258 update_table_tick (XVECEXP (x
, i
, j
));
13261 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13262 are saying that the register is clobbered and we no longer know its
13263 value. If INSN is zero, don't update reg_stat[].last_set; this is
13264 only permitted with VALUE also zero and is used to invalidate the
13268 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13270 unsigned int regno
= REGNO (reg
);
13271 unsigned int endregno
= END_REGNO (reg
);
13273 reg_stat_type
*rsp
;
13275 /* If VALUE contains REG and we have a previous value for REG, substitute
13276 the previous value. */
13277 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13281 /* Set things up so get_last_value is allowed to see anything set up to
13283 subst_low_luid
= DF_INSN_LUID (insn
);
13284 tem
= get_last_value (reg
);
13286 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13287 it isn't going to be useful and will take a lot of time to process,
13288 so just use the CLOBBER. */
13292 if (ARITHMETIC_P (tem
)
13293 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13294 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13295 tem
= XEXP (tem
, 0);
13296 else if (count_occurrences (value
, reg
, 1) >= 2)
13298 /* If there are two or more occurrences of REG in VALUE,
13299 prevent the value from growing too much. */
13300 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
13301 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13304 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13308 /* For each register modified, show we don't know its value, that
13309 we don't know about its bitwise content, that its value has been
13310 updated, and that we don't know the location of the death of the
13312 for (i
= regno
; i
< endregno
; i
++)
13314 rsp
= ®_stat
[i
];
13317 rsp
->last_set
= insn
;
13319 rsp
->last_set_value
= 0;
13320 rsp
->last_set_mode
= VOIDmode
;
13321 rsp
->last_set_nonzero_bits
= 0;
13322 rsp
->last_set_sign_bit_copies
= 0;
13323 rsp
->last_death
= 0;
13324 rsp
->truncated_to_mode
= VOIDmode
;
13327 /* Mark registers that are being referenced in this value. */
13329 update_table_tick (value
);
13331 /* Now update the status of each register being set.
13332 If someone is using this register in this block, set this register
13333 to invalid since we will get confused between the two lives in this
13334 basic block. This makes using this register always invalid. In cse, we
13335 scan the table to invalidate all entries using this register, but this
13336 is too much work for us. */
13338 for (i
= regno
; i
< endregno
; i
++)
13340 rsp
= ®_stat
[i
];
13341 rsp
->last_set_label
= label_tick
;
13343 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13344 rsp
->last_set_invalid
= 1;
13346 rsp
->last_set_invalid
= 0;
13349 /* The value being assigned might refer to X (like in "x++;"). In that
13350 case, we must replace it with (clobber (const_int 0)) to prevent
13352 rsp
= ®_stat
[regno
];
13353 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13355 value
= copy_rtx (value
);
13356 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13360 /* For the main register being modified, update the value, the mode, the
13361 nonzero bits, and the number of sign bit copies. */
13363 rsp
->last_set_value
= value
;
13367 machine_mode mode
= GET_MODE (reg
);
13368 subst_low_luid
= DF_INSN_LUID (insn
);
13369 rsp
->last_set_mode
= mode
;
13370 if (GET_MODE_CLASS (mode
) == MODE_INT
13371 && HWI_COMPUTABLE_MODE_P (mode
))
13372 mode
= nonzero_bits_mode
;
13373 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13374 rsp
->last_set_sign_bit_copies
13375 = num_sign_bit_copies (value
, GET_MODE (reg
));
13379 /* Called via note_stores from record_dead_and_set_regs to handle one
13380 SET or CLOBBER in an insn. DATA is the instruction in which the
13381 set is occurring. */
13384 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13386 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13388 if (GET_CODE (dest
) == SUBREG
)
13389 dest
= SUBREG_REG (dest
);
13391 if (!record_dead_insn
)
13394 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13400 /* If we are setting the whole register, we know its value. Otherwise
13401 show that we don't know the value. We can handle a SUBREG if it's
13402 the low part, but we must be careful with paradoxical SUBREGs on
13403 RISC architectures because we cannot strip e.g. an extension around
13404 a load and record the naked load since the RTL middle-end considers
13405 that the upper bits are defined according to LOAD_EXTEND_OP. */
13406 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13407 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13408 else if (GET_CODE (setter
) == SET
13409 && GET_CODE (SET_DEST (setter
)) == SUBREG
13410 && SUBREG_REG (SET_DEST (setter
)) == dest
13411 && known_le (GET_MODE_PRECISION (GET_MODE (dest
)),
13413 && subreg_lowpart_p (SET_DEST (setter
)))
13414 record_value_for_reg (dest
, record_dead_insn
,
13415 WORD_REGISTER_OPERATIONS
13416 && word_register_operation_p (SET_SRC (setter
))
13417 && paradoxical_subreg_p (SET_DEST (setter
))
13419 : gen_lowpart (GET_MODE (dest
),
13420 SET_SRC (setter
)));
13421 else if (GET_CODE (setter
) == CLOBBER_HIGH
)
13423 reg_stat_type
*rsp
= ®_stat
[REGNO (dest
)];
13424 if (rsp
->last_set_value
13425 && reg_is_clobbered_by_clobber_high
13426 (REGNO (dest
), GET_MODE (rsp
->last_set_value
),
13428 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13431 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13433 else if (MEM_P (dest
)
13434 /* Ignore pushes, they clobber nothing. */
13435 && ! push_operand (dest
, GET_MODE (dest
)))
13436 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13439 /* Update the records of when each REG was most recently set or killed
13440 for the things done by INSN. This is the last thing done in processing
13441 INSN in the combiner loop.
13443 We update reg_stat[], in particular fields last_set, last_set_value,
13444 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13445 last_death, and also the similar information mem_last_set (which insn
13446 most recently modified memory) and last_call_luid (which insn was the
13447 most recent subroutine call). */
13450 record_dead_and_set_regs (rtx_insn
*insn
)
13455 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13457 if (REG_NOTE_KIND (link
) == REG_DEAD
13458 && REG_P (XEXP (link
, 0)))
13460 unsigned int regno
= REGNO (XEXP (link
, 0));
13461 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13463 for (i
= regno
; i
< endregno
; i
++)
13465 reg_stat_type
*rsp
;
13467 rsp
= ®_stat
[i
];
13468 rsp
->last_death
= insn
;
13471 else if (REG_NOTE_KIND (link
) == REG_INC
)
13472 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13477 hard_reg_set_iterator hrsi
;
13478 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13480 reg_stat_type
*rsp
;
13482 rsp
= ®_stat
[i
];
13483 rsp
->last_set_invalid
= 1;
13484 rsp
->last_set
= insn
;
13485 rsp
->last_set_value
= 0;
13486 rsp
->last_set_mode
= VOIDmode
;
13487 rsp
->last_set_nonzero_bits
= 0;
13488 rsp
->last_set_sign_bit_copies
= 0;
13489 rsp
->last_death
= 0;
13490 rsp
->truncated_to_mode
= VOIDmode
;
13493 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13495 /* We can't combine into a call pattern. Remember, though, that
13496 the return value register is set at this LUID. We could
13497 still replace a register with the return value from the
13498 wrong subroutine call! */
13499 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13502 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13505 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13506 register present in the SUBREG, so for each such SUBREG go back and
13507 adjust nonzero and sign bit information of the registers that are
13508 known to have some zero/sign bits set.
13510 This is needed because when combine blows the SUBREGs away, the
13511 information on zero/sign bits is lost and further combines can be
13512 missed because of that. */
13515 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13517 struct insn_link
*links
;
13519 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13520 machine_mode mode
= GET_MODE (subreg
);
13522 if (!HWI_COMPUTABLE_MODE_P (mode
))
13525 for (links
= LOG_LINKS (insn
); links
;)
13527 reg_stat_type
*rsp
;
13529 insn
= links
->insn
;
13530 set
= single_set (insn
);
13532 if (! set
|| !REG_P (SET_DEST (set
))
13533 || REGNO (SET_DEST (set
)) != regno
13534 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13536 links
= links
->next
;
13540 rsp
= ®_stat
[regno
];
13541 if (rsp
->last_set
== insn
)
13543 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13544 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13547 if (REG_P (SET_SRC (set
)))
13549 regno
= REGNO (SET_SRC (set
));
13550 links
= LOG_LINKS (insn
);
13557 /* Check if X, a register, is known to contain a value already
13558 truncated to MODE. In this case we can use a subreg to refer to
13559 the truncated value even though in the generic case we would need
13560 an explicit truncation. */
13563 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13565 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13566 machine_mode truncated
= rsp
->truncated_to_mode
;
13569 || rsp
->truncation_label
< label_tick_ebb_start
)
13571 if (!partial_subreg_p (mode
, truncated
))
13573 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13578 /* If X is a hard reg or a subreg record the mode that the register is
13579 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13580 able to turn a truncate into a subreg using this information. Return true
13581 if traversing X is complete. */
13584 record_truncated_value (rtx x
)
13586 machine_mode truncated_mode
;
13587 reg_stat_type
*rsp
;
13589 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13591 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13592 truncated_mode
= GET_MODE (x
);
13594 if (!partial_subreg_p (truncated_mode
, original_mode
))
13597 truncated_mode
= GET_MODE (x
);
13598 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13601 x
= SUBREG_REG (x
);
13603 /* ??? For hard-regs we now record everything. We might be able to
13604 optimize this using last_set_mode. */
13605 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13606 truncated_mode
= GET_MODE (x
);
13610 rsp
= ®_stat
[REGNO (x
)];
13611 if (rsp
->truncated_to_mode
== 0
13612 || rsp
->truncation_label
< label_tick_ebb_start
13613 || partial_subreg_p (truncated_mode
, rsp
->truncated_to_mode
))
13615 rsp
->truncated_to_mode
= truncated_mode
;
13616 rsp
->truncation_label
= label_tick
;
13622 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13623 the modes they are used in. This can help truning TRUNCATEs into
13627 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13629 subrtx_var_iterator::array_type array
;
13630 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13631 if (record_truncated_value (*iter
))
13632 iter
.skip_subrtxes ();
13635 /* Scan X for promoted SUBREGs. For each one found,
13636 note what it implies to the registers used in it. */
13639 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13641 if (GET_CODE (x
) == SUBREG
13642 && SUBREG_PROMOTED_VAR_P (x
)
13643 && REG_P (SUBREG_REG (x
)))
13644 record_promoted_value (insn
, x
);
13647 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13650 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13654 check_promoted_subreg (insn
, XEXP (x
, i
));
13658 if (XVEC (x
, i
) != 0)
13659 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13660 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13666 /* Verify that all the registers and memory references mentioned in *LOC are
13667 still valid. *LOC was part of a value set in INSN when label_tick was
13668 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13669 the invalid references with (clobber (const_int 0)) and return 1. This
13670 replacement is useful because we often can get useful information about
13671 the form of a value (e.g., if it was produced by a shift that always
13672 produces -1 or 0) even though we don't know exactly what registers it
13673 was produced from. */
13676 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13679 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13680 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13685 unsigned int regno
= REGNO (x
);
13686 unsigned int endregno
= END_REGNO (x
);
13689 for (j
= regno
; j
< endregno
; j
++)
13691 reg_stat_type
*rsp
= ®_stat
[j
];
13692 if (rsp
->last_set_invalid
13693 /* If this is a pseudo-register that was only set once and not
13694 live at the beginning of the function, it is always valid. */
13695 || (! (regno
>= FIRST_PSEUDO_REGISTER
13696 && regno
< reg_n_sets_max
13697 && REG_N_SETS (regno
) == 1
13698 && (!REGNO_REG_SET_P
13699 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13701 && rsp
->last_set_label
> tick
))
13704 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13711 /* If this is a memory reference, make sure that there were no stores after
13712 it that might have clobbered the value. We don't have alias info, so we
13713 assume any store invalidates it. Moreover, we only have local UIDs, so
13714 we also assume that there were stores in the intervening basic blocks. */
13715 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13716 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13719 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13723 for (i
= 0; i
< len
; i
++)
13727 /* Check for identical subexpressions. If x contains
13728 identical subexpression we only have to traverse one of
13730 if (i
== 1 && ARITHMETIC_P (x
))
13732 /* Note that at this point x0 has already been checked
13733 and found valid. */
13734 rtx x0
= XEXP (x
, 0);
13735 rtx x1
= XEXP (x
, 1);
13737 /* If x0 and x1 are identical then x is also valid. */
13741 /* If x1 is identical to a subexpression of x0 then
13742 while checking x0, x1 has already been checked. Thus
13743 it is valid and so as x. */
13744 if (ARITHMETIC_P (x0
)
13745 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13748 /* If x0 is identical to a subexpression of x1 then x is
13749 valid iff the rest of x1 is valid. */
13750 if (ARITHMETIC_P (x1
)
13751 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13753 get_last_value_validate (&XEXP (x1
,
13754 x0
== XEXP (x1
, 0) ? 1 : 0),
13755 insn
, tick
, replace
);
13758 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13762 else if (fmt
[i
] == 'E')
13763 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13764 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13765 insn
, tick
, replace
) == 0)
13769 /* If we haven't found a reason for it to be invalid, it is valid. */
13773 /* Get the last value assigned to X, if known. Some registers
13774 in the value may be replaced with (clobber (const_int 0)) if their value
13775 is known longer known reliably. */
13778 get_last_value (const_rtx x
)
13780 unsigned int regno
;
13782 reg_stat_type
*rsp
;
13784 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13785 then convert it to the desired mode. If this is a paradoxical SUBREG,
13786 we cannot predict what values the "extra" bits might have. */
13787 if (GET_CODE (x
) == SUBREG
13788 && subreg_lowpart_p (x
)
13789 && !paradoxical_subreg_p (x
)
13790 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13791 return gen_lowpart (GET_MODE (x
), value
);
13797 rsp
= ®_stat
[regno
];
13798 value
= rsp
->last_set_value
;
13800 /* If we don't have a value, or if it isn't for this basic block and
13801 it's either a hard register, set more than once, or it's a live
13802 at the beginning of the function, return 0.
13804 Because if it's not live at the beginning of the function then the reg
13805 is always set before being used (is never used without being set).
13806 And, if it's set only once, and it's always set before use, then all
13807 uses must have the same last value, even if it's not from this basic
13811 || (rsp
->last_set_label
< label_tick_ebb_start
13812 && (regno
< FIRST_PSEUDO_REGISTER
13813 || regno
>= reg_n_sets_max
13814 || REG_N_SETS (regno
) != 1
13816 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13819 /* If the value was set in a later insn than the ones we are processing,
13820 we can't use it even if the register was only set once. */
13821 if (rsp
->last_set_label
== label_tick
13822 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13825 /* If fewer bits were set than what we are asked for now, we cannot use
13827 if (maybe_lt (GET_MODE_PRECISION (rsp
->last_set_mode
),
13828 GET_MODE_PRECISION (GET_MODE (x
))))
13831 /* If the value has all its registers valid, return it. */
13832 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13835 /* Otherwise, make a copy and replace any invalid register with
13836 (clobber (const_int 0)). If that fails for some reason, return 0. */
13838 value
= copy_rtx (value
);
13839 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13845 /* Define three variables used for communication between the following
13848 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13849 static int reg_dead_flag
;
13852 /* Function called via note_stores from reg_dead_at_p.
13854 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13855 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13858 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13860 unsigned int regno
, endregno
;
13865 if (GET_CODE (x
) == CLOBBER_HIGH
13866 && !reg_is_clobbered_by_clobber_high (reg_dead_reg
, XEXP (x
, 0)))
13869 regno
= REGNO (dest
);
13870 endregno
= END_REGNO (dest
);
13871 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13872 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13875 /* Return nonzero if REG is known to be dead at INSN.
13877 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13878 referencing REG, it is dead. If we hit a SET referencing REG, it is
13879 live. Otherwise, see if it is live or dead at the start of the basic
13880 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13881 must be assumed to be always live. */
13884 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13889 /* Set variables for reg_dead_at_p_1. */
13890 reg_dead_regno
= REGNO (reg
);
13891 reg_dead_endregno
= END_REGNO (reg
);
13892 reg_dead_reg
= reg
;
13896 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13897 we allow the machine description to decide whether use-and-clobber
13898 patterns are OK. */
13899 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13901 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13902 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13906 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13907 beginning of basic block. */
13908 block
= BLOCK_FOR_INSN (insn
);
13913 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13916 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13918 return reg_dead_flag
== 1 ? 1 : 0;
13920 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13924 if (insn
== BB_HEAD (block
))
13927 insn
= PREV_INSN (insn
);
13930 /* Look at live-in sets for the basic block that we were in. */
13931 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13932 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13938 /* Note hard registers in X that are used. */
13941 mark_used_regs_combine (rtx x
)
13943 RTX_CODE code
= GET_CODE (x
);
13944 unsigned int regno
;
13955 case ADDR_DIFF_VEC
:
13957 /* CC0 must die in the insn after it is set, so we don't need to take
13958 special note of it here. */
13963 /* If we are clobbering a MEM, mark any hard registers inside the
13964 address as used. */
13965 if (MEM_P (XEXP (x
, 0)))
13966 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13971 /* A hard reg in a wide mode may really be multiple registers.
13972 If so, mark all of them just like the first. */
13973 if (regno
< FIRST_PSEUDO_REGISTER
)
13975 /* None of this applies to the stack, frame or arg pointers. */
13976 if (regno
== STACK_POINTER_REGNUM
13977 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13978 && regno
== HARD_FRAME_POINTER_REGNUM
)
13979 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13980 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13981 || regno
== FRAME_POINTER_REGNUM
)
13984 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13990 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13992 rtx testreg
= SET_DEST (x
);
13994 while (GET_CODE (testreg
) == SUBREG
13995 || GET_CODE (testreg
) == ZERO_EXTRACT
13996 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13997 testreg
= XEXP (testreg
, 0);
13999 if (MEM_P (testreg
))
14000 mark_used_regs_combine (XEXP (testreg
, 0));
14002 mark_used_regs_combine (SET_SRC (x
));
14010 /* Recursively scan the operands of this expression. */
14013 const char *fmt
= GET_RTX_FORMAT (code
);
14015 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
14018 mark_used_regs_combine (XEXP (x
, i
));
14019 else if (fmt
[i
] == 'E')
14023 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
14024 mark_used_regs_combine (XVECEXP (x
, i
, j
));
14030 /* Remove register number REGNO from the dead registers list of INSN.
14032 Return the note used to record the death, if there was one. */
14035 remove_death (unsigned int regno
, rtx_insn
*insn
)
14037 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
14040 remove_note (insn
, note
);
14045 /* For each register (hardware or pseudo) used within expression X, if its
14046 death is in an instruction with luid between FROM_LUID (inclusive) and
14047 TO_INSN (exclusive), put a REG_DEAD note for that register in the
14048 list headed by PNOTES.
14050 That said, don't move registers killed by maybe_kill_insn.
14052 This is done when X is being merged by combination into TO_INSN. These
14053 notes will then be distributed as needed. */
14056 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
14061 enum rtx_code code
= GET_CODE (x
);
14065 unsigned int regno
= REGNO (x
);
14066 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
14068 /* If we do not know where the register died, it may still die between
14069 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
14070 if (!where_dead
|| DF_INSN_LUID (where_dead
) >= DF_INSN_LUID (to_insn
))
14072 rtx_insn
*insn
= prev_real_nondebug_insn (to_insn
);
14074 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (to_insn
)
14075 && DF_INSN_LUID (insn
) >= from_luid
)
14077 if (dead_or_set_regno_p (insn
, regno
))
14079 if (find_regno_note (insn
, REG_DEAD
, regno
))
14084 insn
= prev_real_nondebug_insn (insn
);
14088 /* Don't move the register if it gets killed in between from and to. */
14089 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
14090 && ! reg_referenced_p (x
, maybe_kill_insn
))
14094 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
14095 && DF_INSN_LUID (where_dead
) >= from_luid
14096 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
14098 rtx note
= remove_death (regno
, where_dead
);
14100 /* It is possible for the call above to return 0. This can occur
14101 when last_death points to I2 or I1 that we combined with.
14102 In that case make a new note.
14104 We must also check for the case where X is a hard register
14105 and NOTE is a death note for a range of hard registers
14106 including X. In that case, we must put REG_DEAD notes for
14107 the remaining registers in place of NOTE. */
14109 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
14110 && partial_subreg_p (GET_MODE (x
), GET_MODE (XEXP (note
, 0))))
14112 unsigned int deadregno
= REGNO (XEXP (note
, 0));
14113 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
14114 unsigned int ourend
= END_REGNO (x
);
14117 for (i
= deadregno
; i
< deadend
; i
++)
14118 if (i
< regno
|| i
>= ourend
)
14119 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
14122 /* If we didn't find any note, or if we found a REG_DEAD note that
14123 covers only part of the given reg, and we have a multi-reg hard
14124 register, then to be safe we must check for REG_DEAD notes
14125 for each register other than the first. They could have
14126 their own REG_DEAD notes lying around. */
14127 else if ((note
== 0
14129 && partial_subreg_p (GET_MODE (XEXP (note
, 0)),
14131 && regno
< FIRST_PSEUDO_REGISTER
14132 && REG_NREGS (x
) > 1)
14134 unsigned int ourend
= END_REGNO (x
);
14135 unsigned int i
, offset
;
14139 offset
= hard_regno_nregs (regno
, GET_MODE (XEXP (note
, 0)));
14143 for (i
= regno
+ offset
; i
< ourend
; i
++)
14144 move_deaths (regno_reg_rtx
[i
],
14145 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
14148 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
14150 XEXP (note
, 1) = *pnotes
;
14154 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
14160 else if (GET_CODE (x
) == SET
)
14162 rtx dest
= SET_DEST (x
);
14164 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14166 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14167 that accesses one word of a multi-word item, some
14168 piece of everything register in the expression is used by
14169 this insn, so remove any old death. */
14170 /* ??? So why do we test for equality of the sizes? */
14172 if (GET_CODE (dest
) == ZERO_EXTRACT
14173 || GET_CODE (dest
) == STRICT_LOW_PART
14174 || (GET_CODE (dest
) == SUBREG
14175 && !read_modify_subreg_p (dest
)))
14177 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14181 /* If this is some other SUBREG, we know it replaces the entire
14182 value, so use that as the destination. */
14183 if (GET_CODE (dest
) == SUBREG
)
14184 dest
= SUBREG_REG (dest
);
14186 /* If this is a MEM, adjust deaths of anything used in the address.
14187 For a REG (the only other possibility), the entire value is
14188 being replaced so the old value is not used in this insn. */
14191 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
14196 else if (GET_CODE (x
) == CLOBBER
)
14199 len
= GET_RTX_LENGTH (code
);
14200 fmt
= GET_RTX_FORMAT (code
);
14202 for (i
= 0; i
< len
; i
++)
14207 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
14208 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
14211 else if (fmt
[i
] == 'e')
14212 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14216 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14217 pattern of an insn. X must be a REG. */
14220 reg_bitfield_target_p (rtx x
, rtx body
)
14224 if (GET_CODE (body
) == SET
)
14226 rtx dest
= SET_DEST (body
);
14228 unsigned int regno
, tregno
, endregno
, endtregno
;
14230 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14231 target
= XEXP (dest
, 0);
14232 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14233 target
= SUBREG_REG (XEXP (dest
, 0));
14237 if (GET_CODE (target
) == SUBREG
)
14238 target
= SUBREG_REG (target
);
14240 if (!REG_P (target
))
14243 tregno
= REGNO (target
), regno
= REGNO (x
);
14244 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14245 return target
== x
;
14247 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14248 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14250 return endregno
> tregno
&& regno
< endtregno
;
14253 else if (GET_CODE (body
) == PARALLEL
)
14254 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14255 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14261 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14262 as appropriate. I3 and I2 are the insns resulting from the combination
14263 insns including FROM (I2 may be zero).
14265 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14266 not need REG_DEAD notes because they are being substituted for. This
14267 saves searching in the most common cases.
14269 Each note in the list is either ignored or placed on some insns, depending
14270 on the type of note. */
14273 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14274 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14276 rtx note
, next_note
;
14278 rtx_insn
*tem_insn
;
14280 for (note
= notes
; note
; note
= next_note
)
14282 rtx_insn
*place
= 0, *place2
= 0;
14284 next_note
= XEXP (note
, 1);
14285 switch (REG_NOTE_KIND (note
))
14289 /* Doesn't matter much where we put this, as long as it's somewhere.
14290 It is preferable to keep these notes on branches, which is most
14291 likely to be i3. */
14295 case REG_NON_LOCAL_GOTO
:
14300 gcc_assert (i2
&& JUMP_P (i2
));
14305 case REG_EH_REGION
:
14306 /* These notes must remain with the call or trapping instruction. */
14309 else if (i2
&& CALL_P (i2
))
14313 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14314 if (may_trap_p (i3
))
14316 else if (i2
&& may_trap_p (i2
))
14318 /* ??? Otherwise assume we've combined things such that we
14319 can now prove that the instructions can't trap. Drop the
14320 note in this case. */
14324 case REG_ARGS_SIZE
:
14325 /* ??? How to distribute between i3-i1. Assume i3 contains the
14326 entire adjustment. Assert i3 contains at least some adjust. */
14327 if (!noop_move_p (i3
))
14329 poly_int64 old_size
, args_size
= get_args_size (note
);
14330 /* fixup_args_size_notes looks at REG_NORETURN note,
14331 so ensure the note is placed there first. */
14335 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14336 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14340 XEXP (n
, 1) = REG_NOTES (i3
);
14341 REG_NOTES (i3
) = n
;
14345 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14346 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14347 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14348 gcc_assert (maybe_ne (old_size
, args_size
)
14350 && !ACCUMULATE_OUTGOING_ARGS
14351 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14358 case REG_CALL_DECL
:
14359 case REG_CALL_NOCF_CHECK
:
14360 /* These notes must remain with the call. It should not be
14361 possible for both I2 and I3 to be a call. */
14366 gcc_assert (i2
&& CALL_P (i2
));
14372 /* Any clobbers for i3 may still exist, and so we must process
14373 REG_UNUSED notes from that insn.
14375 Any clobbers from i2 or i1 can only exist if they were added by
14376 recog_for_combine. In that case, recog_for_combine created the
14377 necessary REG_UNUSED notes. Trying to keep any original
14378 REG_UNUSED notes from these insns can cause incorrect output
14379 if it is for the same register as the original i3 dest.
14380 In that case, we will notice that the register is set in i3,
14381 and then add a REG_UNUSED note for the destination of i3, which
14382 is wrong. However, it is possible to have REG_UNUSED notes from
14383 i2 or i1 for register which were both used and clobbered, so
14384 we keep notes from i2 or i1 if they will turn into REG_DEAD
14387 /* If this register is set or clobbered in I3, put the note there
14388 unless there is one already. */
14389 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14391 if (from_insn
!= i3
)
14394 if (! (REG_P (XEXP (note
, 0))
14395 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14396 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14399 /* Otherwise, if this register is used by I3, then this register
14400 now dies here, so we must put a REG_DEAD note here unless there
14402 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14403 && ! (REG_P (XEXP (note
, 0))
14404 ? find_regno_note (i3
, REG_DEAD
,
14405 REGNO (XEXP (note
, 0)))
14406 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14408 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14412 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14413 but we can't tell which at this point. We must reset any
14414 expectations we had about the value that was previously
14415 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14416 and, if appropriate, restore its previous value, but we
14417 don't have enough information for that at this point. */
14420 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14422 /* Otherwise, if this register is now referenced in i2
14423 then the register used to be modified in one of the
14424 original insns. If it was i3 (say, in an unused
14425 parallel), it's now completely gone, so the note can
14426 be discarded. But if it was modified in i2, i1 or i0
14427 and we still reference it in i2, then we're
14428 referencing the previous value, and since the
14429 register was modified and REG_UNUSED, we know that
14430 the previous value is now dead. So, if we only
14431 reference the register in i2, we change the note to
14432 REG_DEAD, to reflect the previous value. However, if
14433 we're also setting or clobbering the register as
14434 scratch, we know (because the register was not
14435 referenced in i3) that it's unused, just as it was
14436 unused before, and we place the note in i2. */
14437 if (from_insn
!= i3
&& i2
&& INSN_P (i2
)
14438 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14440 if (!reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14441 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14442 if (! (REG_P (XEXP (note
, 0))
14443 ? find_regno_note (i2
, REG_NOTE_KIND (note
),
14444 REGNO (XEXP (note
, 0)))
14445 : find_reg_note (i2
, REG_NOTE_KIND (note
),
14456 /* These notes say something about results of an insn. We can
14457 only support them if they used to be on I3 in which case they
14458 remain on I3. Otherwise they are ignored.
14460 If the note refers to an expression that is not a constant, we
14461 must also ignore the note since we cannot tell whether the
14462 equivalence is still true. It might be possible to do
14463 slightly better than this (we only have a problem if I2DEST
14464 or I1DEST is present in the expression), but it doesn't
14465 seem worth the trouble. */
14467 if (from_insn
== i3
14468 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14473 /* These notes say something about how a register is used. They must
14474 be present on any use of the register in I2 or I3. */
14475 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14478 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14487 case REG_LABEL_TARGET
:
14488 case REG_LABEL_OPERAND
:
14489 /* This can show up in several ways -- either directly in the
14490 pattern, or hidden off in the constant pool with (or without?)
14491 a REG_EQUAL note. */
14492 /* ??? Ignore the without-reg_equal-note problem for now. */
14493 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14494 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14495 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14496 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14500 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14501 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14502 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14503 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14511 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14512 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14514 if (place
&& JUMP_P (place
)
14515 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14516 && (JUMP_LABEL (place
) == NULL
14517 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14519 rtx label
= JUMP_LABEL (place
);
14522 JUMP_LABEL (place
) = XEXP (note
, 0);
14523 else if (LABEL_P (label
))
14524 LABEL_NUSES (label
)--;
14527 if (place2
&& JUMP_P (place2
)
14528 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14529 && (JUMP_LABEL (place2
) == NULL
14530 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14532 rtx label
= JUMP_LABEL (place2
);
14535 JUMP_LABEL (place2
) = XEXP (note
, 0);
14536 else if (LABEL_P (label
))
14537 LABEL_NUSES (label
)--;
14543 /* This note says something about the value of a register prior
14544 to the execution of an insn. It is too much trouble to see
14545 if the note is still correct in all situations. It is better
14546 to simply delete it. */
14550 /* If we replaced the right hand side of FROM_INSN with a
14551 REG_EQUAL note, the original use of the dying register
14552 will not have been combined into I3 and I2. In such cases,
14553 FROM_INSN is guaranteed to be the first of the combined
14554 instructions, so we simply need to search back before
14555 FROM_INSN for the previous use or set of this register,
14556 then alter the notes there appropriately.
14558 If the register is used as an input in I3, it dies there.
14559 Similarly for I2, if it is nonzero and adjacent to I3.
14561 If the register is not used as an input in either I3 or I2
14562 and it is not one of the registers we were supposed to eliminate,
14563 there are two possibilities. We might have a non-adjacent I2
14564 or we might have somehow eliminated an additional register
14565 from a computation. For example, we might have had A & B where
14566 we discover that B will always be zero. In this case we will
14567 eliminate the reference to A.
14569 In both cases, we must search to see if we can find a previous
14570 use of A and put the death note there. */
14573 && from_insn
== i2mod
14574 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14575 tem_insn
= from_insn
;
14579 && CALL_P (from_insn
)
14580 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14582 else if (i2
&& reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14584 /* If the new I2 sets the same register that is marked
14585 dead in the note, we do not in general know where to
14586 put the note. One important case we _can_ handle is
14587 when the note comes from I3. */
14588 if (from_insn
== i3
)
14593 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14595 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14596 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14598 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14600 && reg_overlap_mentioned_p (XEXP (note
, 0),
14602 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14603 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14610 basic_block bb
= this_basic_block
;
14612 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14614 if (!NONDEBUG_INSN_P (tem_insn
))
14616 if (tem_insn
== BB_HEAD (bb
))
14621 /* If the register is being set at TEM_INSN, see if that is all
14622 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14623 into a REG_UNUSED note instead. Don't delete sets to
14624 global register vars. */
14625 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14626 || !global_regs
[REGNO (XEXP (note
, 0))])
14627 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14629 rtx set
= single_set (tem_insn
);
14630 rtx inner_dest
= 0;
14631 rtx_insn
*cc0_setter
= NULL
;
14634 for (inner_dest
= SET_DEST (set
);
14635 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14636 || GET_CODE (inner_dest
) == SUBREG
14637 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14638 inner_dest
= XEXP (inner_dest
, 0))
14641 /* Verify that it was the set, and not a clobber that
14642 modified the register.
14644 CC0 targets must be careful to maintain setter/user
14645 pairs. If we cannot delete the setter due to side
14646 effects, mark the user with an UNUSED note instead
14649 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14650 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14652 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14653 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14654 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14656 /* Move the notes and links of TEM_INSN elsewhere.
14657 This might delete other dead insns recursively.
14658 First set the pattern to something that won't use
14660 rtx old_notes
= REG_NOTES (tem_insn
);
14662 PATTERN (tem_insn
) = pc_rtx
;
14663 REG_NOTES (tem_insn
) = NULL
;
14665 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14666 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14667 distribute_links (LOG_LINKS (tem_insn
));
14669 unsigned int regno
= REGNO (XEXP (note
, 0));
14670 reg_stat_type
*rsp
= ®_stat
[regno
];
14671 if (rsp
->last_set
== tem_insn
)
14672 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14674 SET_INSN_DELETED (tem_insn
);
14675 if (tem_insn
== i2
)
14678 /* Delete the setter too. */
14681 PATTERN (cc0_setter
) = pc_rtx
;
14682 old_notes
= REG_NOTES (cc0_setter
);
14683 REG_NOTES (cc0_setter
) = NULL
;
14685 distribute_notes (old_notes
, cc0_setter
,
14687 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14688 distribute_links (LOG_LINKS (cc0_setter
));
14690 SET_INSN_DELETED (cc0_setter
);
14691 if (cc0_setter
== i2
)
14697 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14699 /* If there isn't already a REG_UNUSED note, put one
14700 here. Do not place a REG_DEAD note, even if
14701 the register is also used here; that would not
14702 match the algorithm used in lifetime analysis
14703 and can cause the consistency check in the
14704 scheduler to fail. */
14705 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14706 REGNO (XEXP (note
, 0))))
14711 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14712 || (CALL_P (tem_insn
)
14713 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14717 /* If we are doing a 3->2 combination, and we have a
14718 register which formerly died in i3 and was not used
14719 by i2, which now no longer dies in i3 and is used in
14720 i2 but does not die in i2, and place is between i2
14721 and i3, then we may need to move a link from place to
14723 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14725 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14726 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14728 struct insn_link
*links
= LOG_LINKS (place
);
14729 LOG_LINKS (place
) = NULL
;
14730 distribute_links (links
);
14735 if (tem_insn
== BB_HEAD (bb
))
14741 /* If the register is set or already dead at PLACE, we needn't do
14742 anything with this note if it is still a REG_DEAD note.
14743 We check here if it is set at all, not if is it totally replaced,
14744 which is what `dead_or_set_p' checks, so also check for it being
14747 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14749 unsigned int regno
= REGNO (XEXP (note
, 0));
14750 reg_stat_type
*rsp
= ®_stat
[regno
];
14752 if (dead_or_set_p (place
, XEXP (note
, 0))
14753 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14755 /* Unless the register previously died in PLACE, clear
14756 last_death. [I no longer understand why this is
14758 if (rsp
->last_death
!= place
)
14759 rsp
->last_death
= 0;
14763 rsp
->last_death
= place
;
14765 /* If this is a death note for a hard reg that is occupying
14766 multiple registers, ensure that we are still using all
14767 parts of the object. If we find a piece of the object
14768 that is unused, we must arrange for an appropriate REG_DEAD
14769 note to be added for it. However, we can't just emit a USE
14770 and tag the note to it, since the register might actually
14771 be dead; so we recourse, and the recursive call then finds
14772 the previous insn that used this register. */
14774 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14776 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14777 bool all_used
= true;
14780 for (i
= regno
; i
< endregno
; i
++)
14781 if ((! refers_to_regno_p (i
, PATTERN (place
))
14782 && ! find_regno_fusage (place
, USE
, i
))
14783 || dead_or_set_regno_p (place
, i
))
14791 /* Put only REG_DEAD notes for pieces that are
14792 not already dead or set. */
14794 for (i
= regno
; i
< endregno
;
14795 i
+= hard_regno_nregs (i
, reg_raw_mode
[i
]))
14797 rtx piece
= regno_reg_rtx
[i
];
14798 basic_block bb
= this_basic_block
;
14800 if (! dead_or_set_p (place
, piece
)
14801 && ! reg_bitfield_target_p (piece
,
14804 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14807 distribute_notes (new_note
, place
, place
,
14808 NULL
, NULL_RTX
, NULL_RTX
,
14811 else if (! refers_to_regno_p (i
, PATTERN (place
))
14812 && ! find_regno_fusage (place
, USE
, i
))
14813 for (tem_insn
= PREV_INSN (place
); ;
14814 tem_insn
= PREV_INSN (tem_insn
))
14816 if (!NONDEBUG_INSN_P (tem_insn
))
14818 if (tem_insn
== BB_HEAD (bb
))
14822 if (dead_or_set_p (tem_insn
, piece
)
14823 || reg_bitfield_target_p (piece
,
14824 PATTERN (tem_insn
)))
14826 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14839 /* Any other notes should not be present at this point in the
14841 gcc_unreachable ();
14846 XEXP (note
, 1) = REG_NOTES (place
);
14847 REG_NOTES (place
) = note
;
14849 /* Set added_notes_insn to the earliest insn we added a note to. */
14850 if (added_notes_insn
== 0
14851 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place
))
14852 added_notes_insn
= place
;
14857 add_shallow_copy_of_reg_note (place2
, note
);
14859 /* Set added_notes_insn to the earliest insn we added a note to. */
14860 if (added_notes_insn
== 0
14861 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place2
))
14862 added_notes_insn
= place2
;
14867 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14868 I3, I2, and I1 to new locations. This is also called to add a link
14869 pointing at I3 when I3's destination is changed. */
14872 distribute_links (struct insn_link
*links
)
14874 struct insn_link
*link
, *next_link
;
14876 for (link
= links
; link
; link
= next_link
)
14878 rtx_insn
*place
= 0;
14882 next_link
= link
->next
;
14884 /* If the insn that this link points to is a NOTE, ignore it. */
14885 if (NOTE_P (link
->insn
))
14889 rtx pat
= PATTERN (link
->insn
);
14890 if (GET_CODE (pat
) == SET
)
14892 else if (GET_CODE (pat
) == PARALLEL
)
14895 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14897 set
= XVECEXP (pat
, 0, i
);
14898 if (GET_CODE (set
) != SET
)
14901 reg
= SET_DEST (set
);
14902 while (GET_CODE (reg
) == ZERO_EXTRACT
14903 || GET_CODE (reg
) == STRICT_LOW_PART
14904 || GET_CODE (reg
) == SUBREG
)
14905 reg
= XEXP (reg
, 0);
14910 if (REGNO (reg
) == link
->regno
)
14913 if (i
== XVECLEN (pat
, 0))
14919 reg
= SET_DEST (set
);
14921 while (GET_CODE (reg
) == ZERO_EXTRACT
14922 || GET_CODE (reg
) == STRICT_LOW_PART
14923 || GET_CODE (reg
) == SUBREG
)
14924 reg
= XEXP (reg
, 0);
14929 /* A LOG_LINK is defined as being placed on the first insn that uses
14930 a register and points to the insn that sets the register. Start
14931 searching at the next insn after the target of the link and stop
14932 when we reach a set of the register or the end of the basic block.
14934 Note that this correctly handles the link that used to point from
14935 I3 to I2. Also note that not much searching is typically done here
14936 since most links don't point very far away. */
14938 for (insn
= NEXT_INSN (link
->insn
);
14939 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14940 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14941 insn
= NEXT_INSN (insn
))
14942 if (DEBUG_INSN_P (insn
))
14944 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14946 if (reg_referenced_p (reg
, PATTERN (insn
)))
14950 else if (CALL_P (insn
)
14951 && find_reg_fusage (insn
, USE
, reg
))
14956 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14959 /* If we found a place to put the link, place it there unless there
14960 is already a link to the same insn as LINK at that point. */
14964 struct insn_link
*link2
;
14966 FOR_EACH_LOG_LINK (link2
, place
)
14967 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14972 link
->next
= LOG_LINKS (place
);
14973 LOG_LINKS (place
) = link
;
14975 /* Set added_links_insn to the earliest insn we added a
14977 if (added_links_insn
== 0
14978 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14979 added_links_insn
= place
;
14985 /* Check for any register or memory mentioned in EQUIV that is not
14986 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14987 of EXPR where some registers may have been replaced by constants. */
14990 unmentioned_reg_p (rtx equiv
, rtx expr
)
14992 subrtx_iterator::array_type array
;
14993 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14995 const_rtx x
= *iter
;
14996 if ((REG_P (x
) || MEM_P (x
))
14997 && !reg_mentioned_p (x
, expr
))
15003 DEBUG_FUNCTION
void
15004 dump_combine_stats (FILE *file
)
15008 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
15009 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
15013 dump_combine_total_stats (FILE *file
)
15017 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15018 total_attempts
, total_merges
, total_extras
, total_successes
);
15021 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15022 the reg-to-reg copy can usefully combine with later instructions, but we
15023 do not want to combine the hard reg into later instructions, for that
15024 restricts register allocation. */
15026 make_more_copies (void)
15030 FOR_EACH_BB_FN (bb
, cfun
)
15034 FOR_BB_INSNS (bb
, insn
)
15036 if (!NONDEBUG_INSN_P (insn
))
15039 rtx set
= single_set (insn
);
15043 rtx dest
= SET_DEST (set
);
15044 if (!(REG_P (dest
) && !HARD_REGISTER_P (dest
)))
15047 rtx src
= SET_SRC (set
);
15048 if (!(REG_P (src
) && HARD_REGISTER_P (src
)))
15050 if (TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
)))
15053 rtx new_reg
= gen_reg_rtx (GET_MODE (dest
));
15054 rtx_insn
*new_insn
= gen_move_insn (new_reg
, src
);
15055 SET_SRC (set
) = new_reg
;
15056 emit_insn_before (new_insn
, insn
);
15057 df_insn_rescan (insn
);
15062 /* Try combining insns through substitution. */
15063 static unsigned int
15064 rest_of_handle_combine (void)
15066 make_more_copies ();
15068 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
15069 df_note_add_problem ();
15072 regstat_init_n_sets_and_refs ();
15073 reg_n_sets_max
= max_reg_num ();
15075 int rebuild_jump_labels_after_combine
15076 = combine_instructions (get_insns (), max_reg_num ());
15078 /* Combining insns may have turned an indirect jump into a
15079 direct jump. Rebuild the JUMP_LABEL fields of jumping
15081 if (rebuild_jump_labels_after_combine
)
15083 if (dom_info_available_p (CDI_DOMINATORS
))
15084 free_dominance_info (CDI_DOMINATORS
);
15085 timevar_push (TV_JUMP
);
15086 rebuild_jump_labels (get_insns ());
15088 timevar_pop (TV_JUMP
);
15091 regstat_free_n_sets_and_refs ();
15097 const pass_data pass_data_combine
=
15099 RTL_PASS
, /* type */
15100 "combine", /* name */
15101 OPTGROUP_NONE
, /* optinfo_flags */
15102 TV_COMBINE
, /* tv_id */
15103 PROP_cfglayout
, /* properties_required */
15104 0, /* properties_provided */
15105 0, /* properties_destroyed */
15106 0, /* todo_flags_start */
15107 TODO_df_finish
, /* todo_flags_finish */
15110 class pass_combine
: public rtl_opt_pass
15113 pass_combine (gcc::context
*ctxt
)
15114 : rtl_opt_pass (pass_data_combine
, ctxt
)
15117 /* opt_pass methods: */
15118 virtual bool gate (function
*) { return (optimize
> 0); }
15119 virtual unsigned int execute (function
*)
15121 return rest_of_handle_combine ();
15124 }; // class pass_combine
15126 } // anon namespace
15129 make_pass_combine (gcc::context
*ctxt
)
15131 return new pass_combine (ctxt
);