combine: Punt on out of range rotate counts [PR93505]
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "expr.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107 #include "function-abi.h"
108
109 /* Number of attempts to combine instructions in this function. */
110
111 static int combine_attempts;
112
113 /* Number of attempts that got as far as substitution in this function. */
114
115 static int combine_merges;
116
117 /* Number of instructions combined with added SETs in this function. */
118
119 static int combine_extras;
120
121 /* Number of instructions combined in this function. */
122
123 static int combine_successes;
124
125 /* Totals over entire compilation. */
126
127 static int total_attempts, total_merges, total_extras, total_successes;
128
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
135
136 static rtx_insn *i2mod;
137
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139
140 static rtx i2mod_old_rhs;
141
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143
144 static rtx i2mod_new_rhs;
145 \f
146 struct reg_stat_type {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx_insn *last_death;
149
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx_insn *last_set;
152
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
158
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
161
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
165
166 Therefore, we maintain the following fields:
167
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
176
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
181
182 (The next two parameters are out of date).
183
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
195
196 /* Record last value assigned to (hard or pseudo) register n. */
197
198 rtx last_set_value;
199
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
202
203 int last_set_table_tick;
204
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
207
208 int last_set_label;
209
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
214
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
222
223 char last_set_invalid;
224
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
229
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
234
235 If an entry is zero, it means that we don't know anything special. */
236
237 unsigned char sign_bit_copies;
238
239 unsigned HOST_WIDE_INT nonzero_bits;
240
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
244
245 int truncation_label;
246
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
251
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
253 };
254
255
256 static vec<reg_stat_type> reg_stat;
257
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260 but during combine_split_insns new pseudos can be created. As we don't have
261 updated DF information in that case, it is hard to initialize the array
262 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
263 so instead of growing the arrays, just assume all newly created pseudos
264 during combine might be set multiple times. */
265
266 static unsigned int reg_n_sets_max;
267
268 /* Record the luid of the last insn that invalidated memory
269 (anything that writes memory, and subroutine calls, but not pushes). */
270
271 static int mem_last_set;
272
273 /* Record the luid of the last CALL_INSN
274 so we can tell whether a potential combination crosses any calls. */
275
276 static int last_call_luid;
277
278 /* When `subst' is called, this is the insn that is being modified
279 (by combining in a previous insn). The PATTERN of this insn
280 is still the old pattern partially modified and it should not be
281 looked at, but this may be used to examine the successors of the insn
282 to judge whether a simplification is valid. */
283
284 static rtx_insn *subst_insn;
285
286 /* This is the lowest LUID that `subst' is currently dealing with.
287 get_last_value will not return a value if the register was set at or
288 after this LUID. If not for this mechanism, we could get confused if
289 I2 or I1 in try_combine were an insn that used the old value of a register
290 to obtain a new value. In that case, we might erroneously get the
291 new value of the register when we wanted the old one. */
292
293 static int subst_low_luid;
294
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296 must consider all these registers to be always live. */
297
298 static HARD_REG_SET newpat_used_regs;
299
300 /* This is an insn to which a LOG_LINKS entry has been added. If this
301 insn is the earlier than I2 or I3, combine should rescan starting at
302 that location. */
303
304 static rtx_insn *added_links_insn;
305
306 /* And similarly, for notes. */
307
308 static rtx_insn *added_notes_insn;
309
310 /* Basic block in which we are performing combines. */
311 static basic_block this_basic_block;
312 static bool optimize_this_for_speed_p;
313
314 \f
315 /* Length of the currently allocated uid_insn_cost array. */
316
317 static int max_uid_known;
318
319 /* The following array records the insn_cost for every insn
320 in the instruction stream. */
321
322 static int *uid_insn_cost;
323
324 /* The following array records the LOG_LINKS for every insn in the
325 instruction stream as struct insn_link pointers. */
326
327 struct insn_link {
328 rtx_insn *insn;
329 unsigned int regno;
330 struct insn_link *next;
331 };
332
333 static struct insn_link **uid_log_links;
334
335 static inline int
336 insn_uid_check (const_rtx insn)
337 {
338 int uid = INSN_UID (insn);
339 gcc_checking_assert (uid <= max_uid_known);
340 return uid;
341 }
342
343 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
345
346 #define FOR_EACH_LOG_LINK(L, INSN) \
347 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348
349 /* Links for LOG_LINKS are allocated from this obstack. */
350
351 static struct obstack insn_link_obstack;
352
353 /* Allocate a link. */
354
355 static inline struct insn_link *
356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
357 {
358 struct insn_link *l
359 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 sizeof (struct insn_link));
361 l->insn = insn;
362 l->regno = regno;
363 l->next = next;
364 return l;
365 }
366
367 /* Incremented for each basic block. */
368
369 static int label_tick;
370
371 /* Reset to label_tick for each extended basic block in scanning order. */
372
373 static int label_tick_ebb_start;
374
375 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
376 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
377
378 static scalar_int_mode nonzero_bits_mode;
379
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381 be safely used. It is zero while computing them and after combine has
382 completed. This former test prevents propagating values based on
383 previously set values, which can be incorrect if a variable is modified
384 in a loop. */
385
386 static int nonzero_sign_valid;
387
388 \f
389 /* Record one modification to rtl structure
390 to be undone by storing old_contents into *where. */
391
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393
394 struct undo
395 {
396 struct undo *next;
397 enum undo_kind kind;
398 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399 union { rtx *r; int *i; struct insn_link **l; } where;
400 };
401
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403 num_undo says how many are currently recorded.
404
405 other_insn is nonzero if we have modified some other insn in the process
406 of working on subst_insn. It must be verified too. */
407
408 struct undobuf
409 {
410 struct undo *undos;
411 struct undo *frees;
412 rtx_insn *other_insn;
413 };
414
415 static struct undobuf undobuf;
416
417 /* Number of times the pseudo being substituted for
418 was found and replaced. */
419
420 static int n_occurrences;
421
422 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
423 scalar_int_mode,
424 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
426 scalar_int_mode,
427 unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
453 unsigned HOST_WIDE_INT *);
454 static rtx canon_reg_for_combine (rtx, rtx);
455 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
456 scalar_int_mode, unsigned HOST_WIDE_INT, int);
457 static rtx force_to_mode (rtx, machine_mode,
458 unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
466 unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
468 unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
498 \f
499
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
504
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
508
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
511
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
514
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
517
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
519
520 \f
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 bool op0_preserve_value)
526 {
527 int code_int = (int)*code;
528 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529 *code = (enum rtx_code)code_int;
530 }
531
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN cannot be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
536 register. */
537
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx_insn *insn)
540 {
541 rtx_insn *ret;
542 unsigned int nregs;
543
544 ret = split_insns (pattern, insn);
545 nregs = max_reg_num ();
546 if (nregs > reg_stat.length ())
547 reg_stat.safe_grow_cleared (nregs);
548 return ret;
549 }
550
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
556
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
559 {
560 rtx x = *loc;
561 enum rtx_code code = GET_CODE (x);
562 rtx *result = NULL;
563 rtx *this_result;
564 int i;
565 const char *fmt;
566
567 switch (code)
568 {
569 case CONST:
570 case LABEL_REF:
571 case SYMBOL_REF:
572 CASE_CONST_ANY:
573 case CLOBBER:
574 return 0;
575
576 case SET:
577 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
578 of a REG that occupies all of the REG, the insn uses DEST if
579 it is mentioned in the destination or the source. Otherwise, we
580 need just check the source. */
581 if (GET_CODE (SET_DEST (x)) != CC0
582 && GET_CODE (SET_DEST (x)) != PC
583 && !REG_P (SET_DEST (x))
584 && ! (GET_CODE (SET_DEST (x)) == SUBREG
585 && REG_P (SUBREG_REG (SET_DEST (x)))
586 && !read_modify_subreg_p (SET_DEST (x))))
587 break;
588
589 return find_single_use_1 (dest, &SET_SRC (x));
590
591 case MEM:
592 case SUBREG:
593 return find_single_use_1 (dest, &XEXP (x, 0));
594
595 default:
596 break;
597 }
598
599 /* If it wasn't one of the common cases above, check each expression and
600 vector of this code. Look for a unique usage of DEST. */
601
602 fmt = GET_RTX_FORMAT (code);
603 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
604 {
605 if (fmt[i] == 'e')
606 {
607 if (dest == XEXP (x, i)
608 || (REG_P (dest) && REG_P (XEXP (x, i))
609 && REGNO (dest) == REGNO (XEXP (x, i))))
610 this_result = loc;
611 else
612 this_result = find_single_use_1 (dest, &XEXP (x, i));
613
614 if (result == NULL)
615 result = this_result;
616 else if (this_result)
617 /* Duplicate usage. */
618 return NULL;
619 }
620 else if (fmt[i] == 'E')
621 {
622 int j;
623
624 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
625 {
626 if (XVECEXP (x, i, j) == dest
627 || (REG_P (dest)
628 && REG_P (XVECEXP (x, i, j))
629 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
630 this_result = loc;
631 else
632 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
633
634 if (result == NULL)
635 result = this_result;
636 else if (this_result)
637 return NULL;
638 }
639 }
640 }
641
642 return result;
643 }
644
645
646 /* See if DEST, produced in INSN, is used only a single time in the
647 sequel. If so, return a pointer to the innermost rtx expression in which
648 it is used.
649
650 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651
652 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
653 care about REG_DEAD notes or LOG_LINKS.
654
655 Otherwise, we find the single use by finding an insn that has a
656 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
657 only referenced once in that insn, we know that it must be the first
658 and last insn referencing DEST. */
659
660 static rtx *
661 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
662 {
663 basic_block bb;
664 rtx_insn *next;
665 rtx *result;
666 struct insn_link *link;
667
668 if (dest == cc0_rtx)
669 {
670 next = NEXT_INSN (insn);
671 if (next == 0
672 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
673 return 0;
674
675 result = find_single_use_1 (dest, &PATTERN (next));
676 if (result && ploc)
677 *ploc = next;
678 return result;
679 }
680
681 if (!REG_P (dest))
682 return 0;
683
684 bb = BLOCK_FOR_INSN (insn);
685 for (next = NEXT_INSN (insn);
686 next && BLOCK_FOR_INSN (next) == bb;
687 next = NEXT_INSN (next))
688 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
689 {
690 FOR_EACH_LOG_LINK (link, next)
691 if (link->insn == insn && link->regno == REGNO (dest))
692 break;
693
694 if (link)
695 {
696 result = find_single_use_1 (dest, &PATTERN (next));
697 if (ploc)
698 *ploc = next;
699 return result;
700 }
701 }
702
703 return 0;
704 }
705 \f
706 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
707 insn. The substitution can be undone by undo_all. If INTO is already
708 set to NEWVAL, do not record this change. Because computing NEWVAL might
709 also call SUBST, we have to compute it before we put anything into
710 the undo table. */
711
712 static void
713 do_SUBST (rtx *into, rtx newval)
714 {
715 struct undo *buf;
716 rtx oldval = *into;
717
718 if (oldval == newval)
719 return;
720
721 /* We'd like to catch as many invalid transformations here as
722 possible. Unfortunately, there are way too many mode changes
723 that are perfectly valid, so we'd waste too much effort for
724 little gain doing the checks here. Focus on catching invalid
725 transformations involving integer constants. */
726 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
727 && CONST_INT_P (newval))
728 {
729 /* Sanity check that we're replacing oldval with a CONST_INT
730 that is a valid sign-extension for the original mode. */
731 gcc_assert (INTVAL (newval)
732 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
733
734 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
735 CONST_INT is not valid, because after the replacement, the
736 original mode would be gone. Unfortunately, we can't tell
737 when do_SUBST is called to replace the operand thereof, so we
738 perform this test on oldval instead, checking whether an
739 invalid replacement took place before we got here. */
740 gcc_assert (!(GET_CODE (oldval) == SUBREG
741 && CONST_INT_P (SUBREG_REG (oldval))));
742 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
743 && CONST_INT_P (XEXP (oldval, 0))));
744 }
745
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
750
751 buf->kind = UNDO_RTX;
752 buf->where.r = into;
753 buf->old_contents.r = oldval;
754 *into = newval;
755
756 buf->next = undobuf.undos, undobuf.undos = buf;
757 }
758
759 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760
761 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
762 for the value of a HOST_WIDE_INT value (including CONST_INT) is
763 not safe. */
764
765 static void
766 do_SUBST_INT (int *into, int newval)
767 {
768 struct undo *buf;
769 int oldval = *into;
770
771 if (oldval == newval)
772 return;
773
774 if (undobuf.frees)
775 buf = undobuf.frees, undobuf.frees = buf->next;
776 else
777 buf = XNEW (struct undo);
778
779 buf->kind = UNDO_INT;
780 buf->where.i = into;
781 buf->old_contents.i = oldval;
782 *into = newval;
783
784 buf->next = undobuf.undos, undobuf.undos = buf;
785 }
786
787 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788
789 /* Similar to SUBST, but just substitute the mode. This is used when
790 changing the mode of a pseudo-register, so that any other
791 references to the entry in the regno_reg_rtx array will change as
792 well. */
793
794 static void
795 do_SUBST_MODE (rtx *into, machine_mode newval)
796 {
797 struct undo *buf;
798 machine_mode oldval = GET_MODE (*into);
799
800 if (oldval == newval)
801 return;
802
803 if (undobuf.frees)
804 buf = undobuf.frees, undobuf.frees = buf->next;
805 else
806 buf = XNEW (struct undo);
807
808 buf->kind = UNDO_MODE;
809 buf->where.r = into;
810 buf->old_contents.m = oldval;
811 adjust_reg_mode (*into, newval);
812
813 buf->next = undobuf.undos, undobuf.undos = buf;
814 }
815
816 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817
818 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
819
820 static void
821 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
822 {
823 struct undo *buf;
824 struct insn_link * oldval = *into;
825
826 if (oldval == newval)
827 return;
828
829 if (undobuf.frees)
830 buf = undobuf.frees, undobuf.frees = buf->next;
831 else
832 buf = XNEW (struct undo);
833
834 buf->kind = UNDO_LINKS;
835 buf->where.l = into;
836 buf->old_contents.l = oldval;
837 *into = newval;
838
839 buf->next = undobuf.undos, undobuf.undos = buf;
840 }
841
842 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 \f
844 /* Subroutine of try_combine. Determine whether the replacement patterns
845 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
846 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
847 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
848 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
849 of all the instructions can be estimated and the replacements are more
850 expensive than the original sequence. */
851
852 static bool
853 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
854 rtx newpat, rtx newi2pat, rtx newotherpat)
855 {
856 int i0_cost, i1_cost, i2_cost, i3_cost;
857 int new_i2_cost, new_i3_cost;
858 int old_cost, new_cost;
859
860 /* Lookup the original insn_costs. */
861 i2_cost = INSN_COST (i2);
862 i3_cost = INSN_COST (i3);
863
864 if (i1)
865 {
866 i1_cost = INSN_COST (i1);
867 if (i0)
868 {
869 i0_cost = INSN_COST (i0);
870 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
871 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
872 }
873 else
874 {
875 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
876 ? i1_cost + i2_cost + i3_cost : 0);
877 i0_cost = 0;
878 }
879 }
880 else
881 {
882 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
883 i1_cost = i0_cost = 0;
884 }
885
886 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 correct that. */
888 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
889 old_cost -= i1_cost;
890
891
892 /* Calculate the replacement insn_costs. */
893 rtx tmp = PATTERN (i3);
894 PATTERN (i3) = newpat;
895 int tmpi = INSN_CODE (i3);
896 INSN_CODE (i3) = -1;
897 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
898 PATTERN (i3) = tmp;
899 INSN_CODE (i3) = tmpi;
900 if (newi2pat)
901 {
902 tmp = PATTERN (i2);
903 PATTERN (i2) = newi2pat;
904 tmpi = INSN_CODE (i2);
905 INSN_CODE (i2) = -1;
906 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
907 PATTERN (i2) = tmp;
908 INSN_CODE (i2) = tmpi;
909 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
910 ? new_i2_cost + new_i3_cost : 0;
911 }
912 else
913 {
914 new_cost = new_i3_cost;
915 new_i2_cost = 0;
916 }
917
918 if (undobuf.other_insn)
919 {
920 int old_other_cost, new_other_cost;
921
922 old_other_cost = INSN_COST (undobuf.other_insn);
923 tmp = PATTERN (undobuf.other_insn);
924 PATTERN (undobuf.other_insn) = newotherpat;
925 tmpi = INSN_CODE (undobuf.other_insn);
926 INSN_CODE (undobuf.other_insn) = -1;
927 new_other_cost = insn_cost (undobuf.other_insn,
928 optimize_this_for_speed_p);
929 PATTERN (undobuf.other_insn) = tmp;
930 INSN_CODE (undobuf.other_insn) = tmpi;
931 if (old_other_cost > 0 && new_other_cost > 0)
932 {
933 old_cost += old_other_cost;
934 new_cost += new_other_cost;
935 }
936 else
937 old_cost = 0;
938 }
939
940 /* Disallow this combination if both new_cost and old_cost are greater than
941 zero, and new_cost is greater than old cost. */
942 int reject = old_cost > 0 && new_cost > old_cost;
943
944 if (dump_file)
945 {
946 fprintf (dump_file, "%s combination of insns ",
947 reject ? "rejecting" : "allowing");
948 if (i0)
949 fprintf (dump_file, "%d, ", INSN_UID (i0));
950 if (i1 && INSN_UID (i1) != INSN_UID (i2))
951 fprintf (dump_file, "%d, ", INSN_UID (i1));
952 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
953
954 fprintf (dump_file, "original costs ");
955 if (i0)
956 fprintf (dump_file, "%d + ", i0_cost);
957 if (i1 && INSN_UID (i1) != INSN_UID (i2))
958 fprintf (dump_file, "%d + ", i1_cost);
959 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
960
961 if (newi2pat)
962 fprintf (dump_file, "replacement costs %d + %d = %d\n",
963 new_i2_cost, new_i3_cost, new_cost);
964 else
965 fprintf (dump_file, "replacement cost %d\n", new_cost);
966 }
967
968 if (reject)
969 return false;
970
971 /* Update the uid_insn_cost array with the replacement costs. */
972 INSN_COST (i2) = new_i2_cost;
973 INSN_COST (i3) = new_i3_cost;
974 if (i1)
975 {
976 INSN_COST (i1) = 0;
977 if (i0)
978 INSN_COST (i0) = 0;
979 }
980
981 return true;
982 }
983
984
985 /* Delete any insns that copy a register to itself.
986 Return true if the CFG was changed. */
987
988 static bool
989 delete_noop_moves (void)
990 {
991 rtx_insn *insn, *next;
992 basic_block bb;
993
994 bool edges_deleted = false;
995
996 FOR_EACH_BB_FN (bb, cfun)
997 {
998 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
999 {
1000 next = NEXT_INSN (insn);
1001 if (INSN_P (insn) && noop_move_p (insn))
1002 {
1003 if (dump_file)
1004 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1005
1006 edges_deleted |= delete_insn_and_edges (insn);
1007 }
1008 }
1009 }
1010
1011 return edges_deleted;
1012 }
1013
1014 \f
1015 /* Return false if we do not want to (or cannot) combine DEF. */
1016 static bool
1017 can_combine_def_p (df_ref def)
1018 {
1019 /* Do not consider if it is pre/post modification in MEM. */
1020 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1021 return false;
1022
1023 unsigned int regno = DF_REF_REGNO (def);
1024
1025 /* Do not combine frame pointer adjustments. */
1026 if ((regno == FRAME_POINTER_REGNUM
1027 && (!reload_completed || frame_pointer_needed))
1028 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1029 && regno == HARD_FRAME_POINTER_REGNUM
1030 && (!reload_completed || frame_pointer_needed))
1031 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1032 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1033 return false;
1034
1035 return true;
1036 }
1037
1038 /* Return false if we do not want to (or cannot) combine USE. */
1039 static bool
1040 can_combine_use_p (df_ref use)
1041 {
1042 /* Do not consider the usage of the stack pointer by function call. */
1043 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1044 return false;
1045
1046 return true;
1047 }
1048
1049 /* Fill in log links field for all insns. */
1050
1051 static void
1052 create_log_links (void)
1053 {
1054 basic_block bb;
1055 rtx_insn **next_use;
1056 rtx_insn *insn;
1057 df_ref def, use;
1058
1059 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1060
1061 /* Pass through each block from the end, recording the uses of each
1062 register and establishing log links when def is encountered.
1063 Note that we do not clear next_use array in order to save time,
1064 so we have to test whether the use is in the same basic block as def.
1065
1066 There are a few cases below when we do not consider the definition or
1067 usage -- these are taken from original flow.c did. Don't ask me why it is
1068 done this way; I don't know and if it works, I don't want to know. */
1069
1070 FOR_EACH_BB_FN (bb, cfun)
1071 {
1072 FOR_BB_INSNS_REVERSE (bb, insn)
1073 {
1074 if (!NONDEBUG_INSN_P (insn))
1075 continue;
1076
1077 /* Log links are created only once. */
1078 gcc_assert (!LOG_LINKS (insn));
1079
1080 FOR_EACH_INSN_DEF (def, insn)
1081 {
1082 unsigned int regno = DF_REF_REGNO (def);
1083 rtx_insn *use_insn;
1084
1085 if (!next_use[regno])
1086 continue;
1087
1088 if (!can_combine_def_p (def))
1089 continue;
1090
1091 use_insn = next_use[regno];
1092 next_use[regno] = NULL;
1093
1094 if (BLOCK_FOR_INSN (use_insn) != bb)
1095 continue;
1096
1097 /* flow.c claimed:
1098
1099 We don't build a LOG_LINK for hard registers contained
1100 in ASM_OPERANDs. If these registers get replaced,
1101 we might wind up changing the semantics of the insn,
1102 even if reload can make what appear to be valid
1103 assignments later. */
1104 if (regno < FIRST_PSEUDO_REGISTER
1105 && asm_noperands (PATTERN (use_insn)) >= 0)
1106 continue;
1107
1108 /* Don't add duplicate links between instructions. */
1109 struct insn_link *links;
1110 FOR_EACH_LOG_LINK (links, use_insn)
1111 if (insn == links->insn && regno == links->regno)
1112 break;
1113
1114 if (!links)
1115 LOG_LINKS (use_insn)
1116 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1117 }
1118
1119 FOR_EACH_INSN_USE (use, insn)
1120 if (can_combine_use_p (use))
1121 next_use[DF_REF_REGNO (use)] = insn;
1122 }
1123 }
1124
1125 free (next_use);
1126 }
1127
1128 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1129 true if we found a LOG_LINK that proves that A feeds B. This only works
1130 if there are no instructions between A and B which could have a link
1131 depending on A, since in that case we would not record a link for B.
1132 We also check the implicit dependency created by a cc0 setter/user
1133 pair. */
1134
1135 static bool
1136 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1137 {
1138 struct insn_link *links;
1139 FOR_EACH_LOG_LINK (links, b)
1140 if (links->insn == a)
1141 return true;
1142 if (HAVE_cc0 && sets_cc0_p (a))
1143 return true;
1144 return false;
1145 }
1146 \f
1147 /* Main entry point for combiner. F is the first insn of the function.
1148 NREGS is the first unused pseudo-reg number.
1149
1150 Return nonzero if the CFG was changed (e.g. if the combiner has
1151 turned an indirect jump instruction into a direct jump). */
1152 static int
1153 combine_instructions (rtx_insn *f, unsigned int nregs)
1154 {
1155 rtx_insn *insn, *next;
1156 rtx_insn *prev;
1157 struct insn_link *links, *nextlinks;
1158 rtx_insn *first;
1159 basic_block last_bb;
1160
1161 int new_direct_jump_p = 0;
1162
1163 for (first = f; first && !NONDEBUG_INSN_P (first); )
1164 first = NEXT_INSN (first);
1165 if (!first)
1166 return 0;
1167
1168 combine_attempts = 0;
1169 combine_merges = 0;
1170 combine_extras = 0;
1171 combine_successes = 0;
1172
1173 rtl_hooks = combine_rtl_hooks;
1174
1175 reg_stat.safe_grow_cleared (nregs);
1176
1177 init_recog_no_volatile ();
1178
1179 /* Allocate array for insn info. */
1180 max_uid_known = get_max_uid ();
1181 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1182 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1183 gcc_obstack_init (&insn_link_obstack);
1184
1185 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1186
1187 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1188 problems when, for example, we have j <<= 1 in a loop. */
1189
1190 nonzero_sign_valid = 0;
1191 label_tick = label_tick_ebb_start = 1;
1192
1193 /* Scan all SETs and see if we can deduce anything about what
1194 bits are known to be zero for some registers and how many copies
1195 of the sign bit are known to exist for those registers.
1196
1197 Also set any known values so that we can use it while searching
1198 for what bits are known to be set. */
1199
1200 setup_incoming_promotions (first);
1201 /* Allow the entry block and the first block to fall into the same EBB.
1202 Conceptually the incoming promotions are assigned to the entry block. */
1203 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1204
1205 create_log_links ();
1206 FOR_EACH_BB_FN (this_basic_block, cfun)
1207 {
1208 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1209 last_call_luid = 0;
1210 mem_last_set = -1;
1211
1212 label_tick++;
1213 if (!single_pred_p (this_basic_block)
1214 || single_pred (this_basic_block) != last_bb)
1215 label_tick_ebb_start = label_tick;
1216 last_bb = this_basic_block;
1217
1218 FOR_BB_INSNS (this_basic_block, insn)
1219 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1220 {
1221 rtx links;
1222
1223 subst_low_luid = DF_INSN_LUID (insn);
1224 subst_insn = insn;
1225
1226 note_stores (insn, set_nonzero_bits_and_sign_copies, insn);
1227 record_dead_and_set_regs (insn);
1228
1229 if (AUTO_INC_DEC)
1230 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1231 if (REG_NOTE_KIND (links) == REG_INC)
1232 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1233 insn);
1234
1235 /* Record the current insn_cost of this instruction. */
1236 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1237 if (dump_file)
1238 {
1239 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1240 dump_insn_slim (dump_file, insn);
1241 }
1242 }
1243 }
1244
1245 nonzero_sign_valid = 1;
1246
1247 /* Now scan all the insns in forward order. */
1248 label_tick = label_tick_ebb_start = 1;
1249 init_reg_last ();
1250 setup_incoming_promotions (first);
1251 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1252 int max_combine = param_max_combine_insns;
1253
1254 FOR_EACH_BB_FN (this_basic_block, cfun)
1255 {
1256 rtx_insn *last_combined_insn = NULL;
1257
1258 /* Ignore instruction combination in basic blocks that are going to
1259 be removed as unreachable anyway. See PR82386. */
1260 if (EDGE_COUNT (this_basic_block->preds) == 0)
1261 continue;
1262
1263 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1264 last_call_luid = 0;
1265 mem_last_set = -1;
1266
1267 label_tick++;
1268 if (!single_pred_p (this_basic_block)
1269 || single_pred (this_basic_block) != last_bb)
1270 label_tick_ebb_start = label_tick;
1271 last_bb = this_basic_block;
1272
1273 rtl_profile_for_bb (this_basic_block);
1274 for (insn = BB_HEAD (this_basic_block);
1275 insn != NEXT_INSN (BB_END (this_basic_block));
1276 insn = next ? next : NEXT_INSN (insn))
1277 {
1278 next = 0;
1279 if (!NONDEBUG_INSN_P (insn))
1280 continue;
1281
1282 while (last_combined_insn
1283 && (!NONDEBUG_INSN_P (last_combined_insn)
1284 || last_combined_insn->deleted ()))
1285 last_combined_insn = PREV_INSN (last_combined_insn);
1286 if (last_combined_insn == NULL_RTX
1287 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1288 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1289 last_combined_insn = insn;
1290
1291 /* See if we know about function return values before this
1292 insn based upon SUBREG flags. */
1293 check_promoted_subreg (insn, PATTERN (insn));
1294
1295 /* See if we can find hardregs and subreg of pseudos in
1296 narrower modes. This could help turning TRUNCATEs
1297 into SUBREGs. */
1298 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1299
1300 /* Try this insn with each insn it links back to. */
1301
1302 FOR_EACH_LOG_LINK (links, insn)
1303 if ((next = try_combine (insn, links->insn, NULL,
1304 NULL, &new_direct_jump_p,
1305 last_combined_insn)) != 0)
1306 {
1307 statistics_counter_event (cfun, "two-insn combine", 1);
1308 goto retry;
1309 }
1310
1311 /* Try each sequence of three linked insns ending with this one. */
1312
1313 if (max_combine >= 3)
1314 FOR_EACH_LOG_LINK (links, insn)
1315 {
1316 rtx_insn *link = links->insn;
1317
1318 /* If the linked insn has been replaced by a note, then there
1319 is no point in pursuing this chain any further. */
1320 if (NOTE_P (link))
1321 continue;
1322
1323 FOR_EACH_LOG_LINK (nextlinks, link)
1324 if ((next = try_combine (insn, link, nextlinks->insn,
1325 NULL, &new_direct_jump_p,
1326 last_combined_insn)) != 0)
1327 {
1328 statistics_counter_event (cfun, "three-insn combine", 1);
1329 goto retry;
1330 }
1331 }
1332
1333 /* Try to combine a jump insn that uses CC0
1334 with a preceding insn that sets CC0, and maybe with its
1335 logical predecessor as well.
1336 This is how we make decrement-and-branch insns.
1337 We need this special code because data flow connections
1338 via CC0 do not get entered in LOG_LINKS. */
1339
1340 if (HAVE_cc0
1341 && JUMP_P (insn)
1342 && (prev = prev_nonnote_insn (insn)) != 0
1343 && NONJUMP_INSN_P (prev)
1344 && sets_cc0_p (PATTERN (prev)))
1345 {
1346 if ((next = try_combine (insn, prev, NULL, NULL,
1347 &new_direct_jump_p,
1348 last_combined_insn)) != 0)
1349 goto retry;
1350
1351 FOR_EACH_LOG_LINK (nextlinks, prev)
1352 if ((next = try_combine (insn, prev, nextlinks->insn,
1353 NULL, &new_direct_jump_p,
1354 last_combined_insn)) != 0)
1355 goto retry;
1356 }
1357
1358 /* Do the same for an insn that explicitly references CC0. */
1359 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1360 && (prev = prev_nonnote_insn (insn)) != 0
1361 && NONJUMP_INSN_P (prev)
1362 && sets_cc0_p (PATTERN (prev))
1363 && GET_CODE (PATTERN (insn)) == SET
1364 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1365 {
1366 if ((next = try_combine (insn, prev, NULL, NULL,
1367 &new_direct_jump_p,
1368 last_combined_insn)) != 0)
1369 goto retry;
1370
1371 FOR_EACH_LOG_LINK (nextlinks, prev)
1372 if ((next = try_combine (insn, prev, nextlinks->insn,
1373 NULL, &new_direct_jump_p,
1374 last_combined_insn)) != 0)
1375 goto retry;
1376 }
1377
1378 /* Finally, see if any of the insns that this insn links to
1379 explicitly references CC0. If so, try this insn, that insn,
1380 and its predecessor if it sets CC0. */
1381 if (HAVE_cc0)
1382 {
1383 FOR_EACH_LOG_LINK (links, insn)
1384 if (NONJUMP_INSN_P (links->insn)
1385 && GET_CODE (PATTERN (links->insn)) == SET
1386 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1387 && (prev = prev_nonnote_insn (links->insn)) != 0
1388 && NONJUMP_INSN_P (prev)
1389 && sets_cc0_p (PATTERN (prev))
1390 && (next = try_combine (insn, links->insn,
1391 prev, NULL, &new_direct_jump_p,
1392 last_combined_insn)) != 0)
1393 goto retry;
1394 }
1395
1396 /* Try combining an insn with two different insns whose results it
1397 uses. */
1398 if (max_combine >= 3)
1399 FOR_EACH_LOG_LINK (links, insn)
1400 for (nextlinks = links->next; nextlinks;
1401 nextlinks = nextlinks->next)
1402 if ((next = try_combine (insn, links->insn,
1403 nextlinks->insn, NULL,
1404 &new_direct_jump_p,
1405 last_combined_insn)) != 0)
1406
1407 {
1408 statistics_counter_event (cfun, "three-insn combine", 1);
1409 goto retry;
1410 }
1411
1412 /* Try four-instruction combinations. */
1413 if (max_combine >= 4)
1414 FOR_EACH_LOG_LINK (links, insn)
1415 {
1416 struct insn_link *next1;
1417 rtx_insn *link = links->insn;
1418
1419 /* If the linked insn has been replaced by a note, then there
1420 is no point in pursuing this chain any further. */
1421 if (NOTE_P (link))
1422 continue;
1423
1424 FOR_EACH_LOG_LINK (next1, link)
1425 {
1426 rtx_insn *link1 = next1->insn;
1427 if (NOTE_P (link1))
1428 continue;
1429 /* I0 -> I1 -> I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks, link1)
1431 if ((next = try_combine (insn, link, link1,
1432 nextlinks->insn,
1433 &new_direct_jump_p,
1434 last_combined_insn)) != 0)
1435 {
1436 statistics_counter_event (cfun, "four-insn combine", 1);
1437 goto retry;
1438 }
1439 /* I0, I1 -> I2, I2 -> I3. */
1440 for (nextlinks = next1->next; nextlinks;
1441 nextlinks = nextlinks->next)
1442 if ((next = try_combine (insn, link, link1,
1443 nextlinks->insn,
1444 &new_direct_jump_p,
1445 last_combined_insn)) != 0)
1446 {
1447 statistics_counter_event (cfun, "four-insn combine", 1);
1448 goto retry;
1449 }
1450 }
1451
1452 for (next1 = links->next; next1; next1 = next1->next)
1453 {
1454 rtx_insn *link1 = next1->insn;
1455 if (NOTE_P (link1))
1456 continue;
1457 /* I0 -> I2; I1, I2 -> I3. */
1458 FOR_EACH_LOG_LINK (nextlinks, link)
1459 if ((next = try_combine (insn, link, link1,
1460 nextlinks->insn,
1461 &new_direct_jump_p,
1462 last_combined_insn)) != 0)
1463 {
1464 statistics_counter_event (cfun, "four-insn combine", 1);
1465 goto retry;
1466 }
1467 /* I0 -> I1; I1, I2 -> I3. */
1468 FOR_EACH_LOG_LINK (nextlinks, link1)
1469 if ((next = try_combine (insn, link, link1,
1470 nextlinks->insn,
1471 &new_direct_jump_p,
1472 last_combined_insn)) != 0)
1473 {
1474 statistics_counter_event (cfun, "four-insn combine", 1);
1475 goto retry;
1476 }
1477 }
1478 }
1479
1480 /* Try this insn with each REG_EQUAL note it links back to. */
1481 FOR_EACH_LOG_LINK (links, insn)
1482 {
1483 rtx set, note;
1484 rtx_insn *temp = links->insn;
1485 if ((set = single_set (temp)) != 0
1486 && (note = find_reg_equal_equiv_note (temp)) != 0
1487 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1488 /* Avoid using a register that may already been marked
1489 dead by an earlier instruction. */
1490 && ! unmentioned_reg_p (note, SET_SRC (set))
1491 && (GET_MODE (note) == VOIDmode
1492 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1493 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1494 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1495 || (GET_MODE (XEXP (SET_DEST (set), 0))
1496 == GET_MODE (note))))))
1497 {
1498 /* Temporarily replace the set's source with the
1499 contents of the REG_EQUAL note. The insn will
1500 be deleted or recognized by try_combine. */
1501 rtx orig_src = SET_SRC (set);
1502 rtx orig_dest = SET_DEST (set);
1503 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1504 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1505 SET_SRC (set) = note;
1506 i2mod = temp;
1507 i2mod_old_rhs = copy_rtx (orig_src);
1508 i2mod_new_rhs = copy_rtx (note);
1509 next = try_combine (insn, i2mod, NULL, NULL,
1510 &new_direct_jump_p,
1511 last_combined_insn);
1512 i2mod = NULL;
1513 if (next)
1514 {
1515 statistics_counter_event (cfun, "insn-with-note combine", 1);
1516 goto retry;
1517 }
1518 SET_SRC (set) = orig_src;
1519 SET_DEST (set) = orig_dest;
1520 }
1521 }
1522
1523 if (!NOTE_P (insn))
1524 record_dead_and_set_regs (insn);
1525
1526 retry:
1527 ;
1528 }
1529 }
1530
1531 default_rtl_profile ();
1532 clear_bb_flags ();
1533 new_direct_jump_p |= purge_all_dead_edges ();
1534 new_direct_jump_p |= delete_noop_moves ();
1535
1536 /* Clean up. */
1537 obstack_free (&insn_link_obstack, NULL);
1538 free (uid_log_links);
1539 free (uid_insn_cost);
1540 reg_stat.release ();
1541
1542 {
1543 struct undo *undo, *next;
1544 for (undo = undobuf.frees; undo; undo = next)
1545 {
1546 next = undo->next;
1547 free (undo);
1548 }
1549 undobuf.frees = 0;
1550 }
1551
1552 total_attempts += combine_attempts;
1553 total_merges += combine_merges;
1554 total_extras += combine_extras;
1555 total_successes += combine_successes;
1556
1557 nonzero_sign_valid = 0;
1558 rtl_hooks = general_rtl_hooks;
1559
1560 /* Make recognizer allow volatile MEMs again. */
1561 init_recog ();
1562
1563 return new_direct_jump_p;
1564 }
1565
1566 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1567
1568 static void
1569 init_reg_last (void)
1570 {
1571 unsigned int i;
1572 reg_stat_type *p;
1573
1574 FOR_EACH_VEC_ELT (reg_stat, i, p)
1575 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1576 }
1577 \f
1578 /* Set up any promoted values for incoming argument registers. */
1579
1580 static void
1581 setup_incoming_promotions (rtx_insn *first)
1582 {
1583 tree arg;
1584 bool strictly_local = false;
1585
1586 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1587 arg = DECL_CHAIN (arg))
1588 {
1589 rtx x, reg = DECL_INCOMING_RTL (arg);
1590 int uns1, uns3;
1591 machine_mode mode1, mode2, mode3, mode4;
1592
1593 /* Only continue if the incoming argument is in a register. */
1594 if (!REG_P (reg))
1595 continue;
1596
1597 /* Determine, if possible, whether all call sites of the current
1598 function lie within the current compilation unit. (This does
1599 take into account the exporting of a function via taking its
1600 address, and so forth.) */
1601 strictly_local
1602 = cgraph_node::local_info_node (current_function_decl)->local;
1603
1604 /* The mode and signedness of the argument before any promotions happen
1605 (equal to the mode of the pseudo holding it at that stage). */
1606 mode1 = TYPE_MODE (TREE_TYPE (arg));
1607 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1608
1609 /* The mode and signedness of the argument after any source language and
1610 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1611 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1612 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1613
1614 /* The mode and signedness of the argument as it is actually passed,
1615 see assign_parm_setup_reg in function.c. */
1616 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1617 TREE_TYPE (cfun->decl), 0);
1618
1619 /* The mode of the register in which the argument is being passed. */
1620 mode4 = GET_MODE (reg);
1621
1622 /* Eliminate sign extensions in the callee when:
1623 (a) A mode promotion has occurred; */
1624 if (mode1 == mode3)
1625 continue;
1626 /* (b) The mode of the register is the same as the mode of
1627 the argument as it is passed; */
1628 if (mode3 != mode4)
1629 continue;
1630 /* (c) There's no language level extension; */
1631 if (mode1 == mode2)
1632 ;
1633 /* (c.1) All callers are from the current compilation unit. If that's
1634 the case we don't have to rely on an ABI, we only have to know
1635 what we're generating right now, and we know that we will do the
1636 mode1 to mode2 promotion with the given sign. */
1637 else if (!strictly_local)
1638 continue;
1639 /* (c.2) The combination of the two promotions is useful. This is
1640 true when the signs match, or if the first promotion is unsigned.
1641 In the later case, (sign_extend (zero_extend x)) is the same as
1642 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1643 else if (uns1)
1644 uns3 = true;
1645 else if (uns3)
1646 continue;
1647
1648 /* Record that the value was promoted from mode1 to mode3,
1649 so that any sign extension at the head of the current
1650 function may be eliminated. */
1651 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1652 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1653 record_value_for_reg (reg, first, x);
1654 }
1655 }
1656
1657 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1658 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1659 because some machines (maybe most) will actually do the sign-extension and
1660 this is the conservative approach.
1661
1662 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1663 kludge. */
1664
1665 static rtx
1666 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1667 {
1668 scalar_int_mode int_mode;
1669 if (CONST_INT_P (src)
1670 && is_a <scalar_int_mode> (mode, &int_mode)
1671 && GET_MODE_PRECISION (int_mode) < prec
1672 && INTVAL (src) > 0
1673 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1674 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1675
1676 return src;
1677 }
1678
1679 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1680 and SET. */
1681
1682 static void
1683 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1684 rtx x)
1685 {
1686 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1687 unsigned HOST_WIDE_INT bits = 0;
1688 rtx reg_equal = NULL, src = SET_SRC (set);
1689 unsigned int num = 0;
1690
1691 if (reg_equal_note)
1692 reg_equal = XEXP (reg_equal_note, 0);
1693
1694 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1695 {
1696 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1697 if (reg_equal)
1698 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1699 }
1700
1701 /* Don't call nonzero_bits if it cannot change anything. */
1702 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1703 {
1704 machine_mode mode = GET_MODE (x);
1705 if (GET_MODE_CLASS (mode) == MODE_INT
1706 && HWI_COMPUTABLE_MODE_P (mode))
1707 mode = nonzero_bits_mode;
1708 bits = nonzero_bits (src, mode);
1709 if (reg_equal && bits)
1710 bits &= nonzero_bits (reg_equal, mode);
1711 rsp->nonzero_bits |= bits;
1712 }
1713
1714 /* Don't call num_sign_bit_copies if it cannot change anything. */
1715 if (rsp->sign_bit_copies != 1)
1716 {
1717 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1718 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1719 {
1720 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1721 if (num == 0 || numeq > num)
1722 num = numeq;
1723 }
1724 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1725 rsp->sign_bit_copies = num;
1726 }
1727 }
1728
1729 /* Called via note_stores. If X is a pseudo that is narrower than
1730 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1731
1732 If we are setting only a portion of X and we can't figure out what
1733 portion, assume all bits will be used since we don't know what will
1734 be happening.
1735
1736 Similarly, set how many bits of X are known to be copies of the sign bit
1737 at all locations in the function. This is the smallest number implied
1738 by any set of X. */
1739
1740 static void
1741 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1742 {
1743 rtx_insn *insn = (rtx_insn *) data;
1744 scalar_int_mode mode;
1745
1746 if (REG_P (x)
1747 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1748 /* If this register is undefined at the start of the file, we can't
1749 say what its contents were. */
1750 && ! REGNO_REG_SET_P
1751 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1752 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1753 && HWI_COMPUTABLE_MODE_P (mode))
1754 {
1755 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1756
1757 if (set == 0 || GET_CODE (set) == CLOBBER)
1758 {
1759 rsp->nonzero_bits = GET_MODE_MASK (mode);
1760 rsp->sign_bit_copies = 1;
1761 return;
1762 }
1763
1764 /* If this register is being initialized using itself, and the
1765 register is uninitialized in this basic block, and there are
1766 no LOG_LINKS which set the register, then part of the
1767 register is uninitialized. In that case we can't assume
1768 anything about the number of nonzero bits.
1769
1770 ??? We could do better if we checked this in
1771 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1772 could avoid making assumptions about the insn which initially
1773 sets the register, while still using the information in other
1774 insns. We would have to be careful to check every insn
1775 involved in the combination. */
1776
1777 if (insn
1778 && reg_referenced_p (x, PATTERN (insn))
1779 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1780 REGNO (x)))
1781 {
1782 struct insn_link *link;
1783
1784 FOR_EACH_LOG_LINK (link, insn)
1785 if (dead_or_set_p (link->insn, x))
1786 break;
1787 if (!link)
1788 {
1789 rsp->nonzero_bits = GET_MODE_MASK (mode);
1790 rsp->sign_bit_copies = 1;
1791 return;
1792 }
1793 }
1794
1795 /* If this is a complex assignment, see if we can convert it into a
1796 simple assignment. */
1797 set = expand_field_assignment (set);
1798
1799 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1800 set what we know about X. */
1801
1802 if (SET_DEST (set) == x
1803 || (paradoxical_subreg_p (SET_DEST (set))
1804 && SUBREG_REG (SET_DEST (set)) == x))
1805 update_rsp_from_reg_equal (rsp, insn, set, x);
1806 else
1807 {
1808 rsp->nonzero_bits = GET_MODE_MASK (mode);
1809 rsp->sign_bit_copies = 1;
1810 }
1811 }
1812 }
1813 \f
1814 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1815 optionally insns that were previously combined into I3 or that will be
1816 combined into the merger of INSN and I3. The order is PRED, PRED2,
1817 INSN, SUCC, SUCC2, I3.
1818
1819 Return 0 if the combination is not allowed for any reason.
1820
1821 If the combination is allowed, *PDEST will be set to the single
1822 destination of INSN and *PSRC to the single source, and this function
1823 will return 1. */
1824
1825 static int
1826 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1827 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1828 rtx *pdest, rtx *psrc)
1829 {
1830 int i;
1831 const_rtx set = 0;
1832 rtx src, dest;
1833 rtx_insn *p;
1834 rtx link;
1835 bool all_adjacent = true;
1836 int (*is_volatile_p) (const_rtx);
1837
1838 if (succ)
1839 {
1840 if (succ2)
1841 {
1842 if (next_active_insn (succ2) != i3)
1843 all_adjacent = false;
1844 if (next_active_insn (succ) != succ2)
1845 all_adjacent = false;
1846 }
1847 else if (next_active_insn (succ) != i3)
1848 all_adjacent = false;
1849 if (next_active_insn (insn) != succ)
1850 all_adjacent = false;
1851 }
1852 else if (next_active_insn (insn) != i3)
1853 all_adjacent = false;
1854
1855 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1856 or a PARALLEL consisting of such a SET and CLOBBERs.
1857
1858 If INSN has CLOBBER parallel parts, ignore them for our processing.
1859 By definition, these happen during the execution of the insn. When it
1860 is merged with another insn, all bets are off. If they are, in fact,
1861 needed and aren't also supplied in I3, they may be added by
1862 recog_for_combine. Otherwise, it won't match.
1863
1864 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1865 note.
1866
1867 Get the source and destination of INSN. If more than one, can't
1868 combine. */
1869
1870 if (GET_CODE (PATTERN (insn)) == SET)
1871 set = PATTERN (insn);
1872 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1873 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1874 {
1875 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1876 {
1877 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1878
1879 switch (GET_CODE (elt))
1880 {
1881 /* This is important to combine floating point insns
1882 for the SH4 port. */
1883 case USE:
1884 /* Combining an isolated USE doesn't make sense.
1885 We depend here on combinable_i3pat to reject them. */
1886 /* The code below this loop only verifies that the inputs of
1887 the SET in INSN do not change. We call reg_set_between_p
1888 to verify that the REG in the USE does not change between
1889 I3 and INSN.
1890 If the USE in INSN was for a pseudo register, the matching
1891 insn pattern will likely match any register; combining this
1892 with any other USE would only be safe if we knew that the
1893 used registers have identical values, or if there was
1894 something to tell them apart, e.g. different modes. For
1895 now, we forgo such complicated tests and simply disallow
1896 combining of USES of pseudo registers with any other USE. */
1897 if (REG_P (XEXP (elt, 0))
1898 && GET_CODE (PATTERN (i3)) == PARALLEL)
1899 {
1900 rtx i3pat = PATTERN (i3);
1901 int i = XVECLEN (i3pat, 0) - 1;
1902 unsigned int regno = REGNO (XEXP (elt, 0));
1903
1904 do
1905 {
1906 rtx i3elt = XVECEXP (i3pat, 0, i);
1907
1908 if (GET_CODE (i3elt) == USE
1909 && REG_P (XEXP (i3elt, 0))
1910 && (REGNO (XEXP (i3elt, 0)) == regno
1911 ? reg_set_between_p (XEXP (elt, 0),
1912 PREV_INSN (insn), i3)
1913 : regno >= FIRST_PSEUDO_REGISTER))
1914 return 0;
1915 }
1916 while (--i >= 0);
1917 }
1918 break;
1919
1920 /* We can ignore CLOBBERs. */
1921 case CLOBBER:
1922 break;
1923
1924 case SET:
1925 /* Ignore SETs whose result isn't used but not those that
1926 have side-effects. */
1927 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1928 && insn_nothrow_p (insn)
1929 && !side_effects_p (elt))
1930 break;
1931
1932 /* If we have already found a SET, this is a second one and
1933 so we cannot combine with this insn. */
1934 if (set)
1935 return 0;
1936
1937 set = elt;
1938 break;
1939
1940 default:
1941 /* Anything else means we can't combine. */
1942 return 0;
1943 }
1944 }
1945
1946 if (set == 0
1947 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1948 so don't do anything with it. */
1949 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1950 return 0;
1951 }
1952 else
1953 return 0;
1954
1955 if (set == 0)
1956 return 0;
1957
1958 /* The simplification in expand_field_assignment may call back to
1959 get_last_value, so set safe guard here. */
1960 subst_low_luid = DF_INSN_LUID (insn);
1961
1962 set = expand_field_assignment (set);
1963 src = SET_SRC (set), dest = SET_DEST (set);
1964
1965 /* Do not eliminate user-specified register if it is in an
1966 asm input because we may break the register asm usage defined
1967 in GCC manual if allow to do so.
1968 Be aware that this may cover more cases than we expect but this
1969 should be harmless. */
1970 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1971 && extract_asm_operands (PATTERN (i3)))
1972 return 0;
1973
1974 /* Don't eliminate a store in the stack pointer. */
1975 if (dest == stack_pointer_rtx
1976 /* Don't combine with an insn that sets a register to itself if it has
1977 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1978 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1979 /* Can't merge an ASM_OPERANDS. */
1980 || GET_CODE (src) == ASM_OPERANDS
1981 /* Can't merge a function call. */
1982 || GET_CODE (src) == CALL
1983 /* Don't eliminate a function call argument. */
1984 || (CALL_P (i3)
1985 && (find_reg_fusage (i3, USE, dest)
1986 || (REG_P (dest)
1987 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1988 && global_regs[REGNO (dest)])))
1989 /* Don't substitute into an incremented register. */
1990 || FIND_REG_INC_NOTE (i3, dest)
1991 || (succ && FIND_REG_INC_NOTE (succ, dest))
1992 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1993 /* Don't substitute into a non-local goto, this confuses CFG. */
1994 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1995 /* Make sure that DEST is not used after INSN but before SUCC, or
1996 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1997 || (!all_adjacent
1998 && ((succ2
1999 && (reg_used_between_p (dest, succ2, i3)
2000 || reg_used_between_p (dest, succ, succ2)))
2001 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
2002 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2003 || (succ
2004 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2005 that case SUCC is not in the insn stream, so use SUCC2
2006 instead for this test. */
2007 && reg_used_between_p (dest, insn,
2008 succ2
2009 && INSN_UID (succ) == INSN_UID (succ2)
2010 ? succ2 : succ))))
2011 /* Make sure that the value that is to be substituted for the register
2012 does not use any registers whose values alter in between. However,
2013 If the insns are adjacent, a use can't cross a set even though we
2014 think it might (this can happen for a sequence of insns each setting
2015 the same destination; last_set of that register might point to
2016 a NOTE). If INSN has a REG_EQUIV note, the register is always
2017 equivalent to the memory so the substitution is valid even if there
2018 are intervening stores. Also, don't move a volatile asm or
2019 UNSPEC_VOLATILE across any other insns. */
2020 || (! all_adjacent
2021 && (((!MEM_P (src)
2022 || ! find_reg_note (insn, REG_EQUIV, src))
2023 && modified_between_p (src, insn, i3))
2024 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2025 || GET_CODE (src) == UNSPEC_VOLATILE))
2026 /* Don't combine across a CALL_INSN, because that would possibly
2027 change whether the life span of some REGs crosses calls or not,
2028 and it is a pain to update that information.
2029 Exception: if source is a constant, moving it later can't hurt.
2030 Accept that as a special case. */
2031 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2032 return 0;
2033
2034 /* DEST must either be a REG or CC0. */
2035 if (REG_P (dest))
2036 {
2037 /* If register alignment is being enforced for multi-word items in all
2038 cases except for parameters, it is possible to have a register copy
2039 insn referencing a hard register that is not allowed to contain the
2040 mode being copied and which would not be valid as an operand of most
2041 insns. Eliminate this problem by not combining with such an insn.
2042
2043 Also, on some machines we don't want to extend the life of a hard
2044 register. */
2045
2046 if (REG_P (src)
2047 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2048 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2049 /* Don't extend the life of a hard register unless it is
2050 user variable (if we have few registers) or it can't
2051 fit into the desired register (meaning something special
2052 is going on).
2053 Also avoid substituting a return register into I3, because
2054 reload can't handle a conflict with constraints of other
2055 inputs. */
2056 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2057 && !targetm.hard_regno_mode_ok (REGNO (src),
2058 GET_MODE (src)))))
2059 return 0;
2060 }
2061 else if (GET_CODE (dest) != CC0)
2062 return 0;
2063
2064
2065 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2066 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2067 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2068 {
2069 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2070
2071 /* If the clobber represents an earlyclobber operand, we must not
2072 substitute an expression containing the clobbered register.
2073 As we do not analyze the constraint strings here, we have to
2074 make the conservative assumption. However, if the register is
2075 a fixed hard reg, the clobber cannot represent any operand;
2076 we leave it up to the machine description to either accept or
2077 reject use-and-clobber patterns. */
2078 if (!REG_P (reg)
2079 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2080 || !fixed_regs[REGNO (reg)])
2081 if (reg_overlap_mentioned_p (reg, src))
2082 return 0;
2083 }
2084
2085 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2086 or not), reject, unless nothing volatile comes between it and I3 */
2087
2088 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2089 {
2090 /* Make sure neither succ nor succ2 contains a volatile reference. */
2091 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2092 return 0;
2093 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2094 return 0;
2095 /* We'll check insns between INSN and I3 below. */
2096 }
2097
2098 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2099 to be an explicit register variable, and was chosen for a reason. */
2100
2101 if (GET_CODE (src) == ASM_OPERANDS
2102 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2103 return 0;
2104
2105 /* If INSN contains volatile references (specifically volatile MEMs),
2106 we cannot combine across any other volatile references.
2107 Even if INSN doesn't contain volatile references, any intervening
2108 volatile insn might affect machine state. */
2109
2110 is_volatile_p = volatile_refs_p (PATTERN (insn))
2111 ? volatile_refs_p
2112 : volatile_insn_p;
2113
2114 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2115 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2116 return 0;
2117
2118 /* If INSN contains an autoincrement or autodecrement, make sure that
2119 register is not used between there and I3, and not already used in
2120 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2121 Also insist that I3 not be a jump if using LRA; if it were one
2122 and the incremented register were spilled, we would lose.
2123 Reload handles this correctly. */
2124
2125 if (AUTO_INC_DEC)
2126 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2127 if (REG_NOTE_KIND (link) == REG_INC
2128 && ((JUMP_P (i3) && targetm.lra_p ())
2129 || reg_used_between_p (XEXP (link, 0), insn, i3)
2130 || (pred != NULL_RTX
2131 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2132 || (pred2 != NULL_RTX
2133 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2134 || (succ != NULL_RTX
2135 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2136 || (succ2 != NULL_RTX
2137 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2138 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2139 return 0;
2140
2141 /* Don't combine an insn that follows a CC0-setting insn.
2142 An insn that uses CC0 must not be separated from the one that sets it.
2143 We do, however, allow I2 to follow a CC0-setting insn if that insn
2144 is passed as I1; in that case it will be deleted also.
2145 We also allow combining in this case if all the insns are adjacent
2146 because that would leave the two CC0 insns adjacent as well.
2147 It would be more logical to test whether CC0 occurs inside I1 or I2,
2148 but that would be much slower, and this ought to be equivalent. */
2149
2150 if (HAVE_cc0)
2151 {
2152 p = prev_nonnote_insn (insn);
2153 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2154 && ! all_adjacent)
2155 return 0;
2156 }
2157
2158 /* If we get here, we have passed all the tests and the combination is
2159 to be allowed. */
2160
2161 *pdest = dest;
2162 *psrc = src;
2163
2164 return 1;
2165 }
2166 \f
2167 /* LOC is the location within I3 that contains its pattern or the component
2168 of a PARALLEL of the pattern. We validate that it is valid for combining.
2169
2170 One problem is if I3 modifies its output, as opposed to replacing it
2171 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2172 doing so would produce an insn that is not equivalent to the original insns.
2173
2174 Consider:
2175
2176 (set (reg:DI 101) (reg:DI 100))
2177 (set (subreg:SI (reg:DI 101) 0) <foo>)
2178
2179 This is NOT equivalent to:
2180
2181 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2182 (set (reg:DI 101) (reg:DI 100))])
2183
2184 Not only does this modify 100 (in which case it might still be valid
2185 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2186
2187 We can also run into a problem if I2 sets a register that I1
2188 uses and I1 gets directly substituted into I3 (not via I2). In that
2189 case, we would be getting the wrong value of I2DEST into I3, so we
2190 must reject the combination. This case occurs when I2 and I1 both
2191 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2192 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2193 of a SET must prevent combination from occurring. The same situation
2194 can occur for I0, in which case I0_NOT_IN_SRC is set.
2195
2196 Before doing the above check, we first try to expand a field assignment
2197 into a set of logical operations.
2198
2199 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2200 we place a register that is both set and used within I3. If more than one
2201 such register is detected, we fail.
2202
2203 Return 1 if the combination is valid, zero otherwise. */
2204
2205 static int
2206 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2207 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2208 {
2209 rtx x = *loc;
2210
2211 if (GET_CODE (x) == SET)
2212 {
2213 rtx set = x ;
2214 rtx dest = SET_DEST (set);
2215 rtx src = SET_SRC (set);
2216 rtx inner_dest = dest;
2217 rtx subdest;
2218
2219 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2220 || GET_CODE (inner_dest) == SUBREG
2221 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2222 inner_dest = XEXP (inner_dest, 0);
2223
2224 /* Check for the case where I3 modifies its output, as discussed
2225 above. We don't want to prevent pseudos from being combined
2226 into the address of a MEM, so only prevent the combination if
2227 i1 or i2 set the same MEM. */
2228 if ((inner_dest != dest &&
2229 (!MEM_P (inner_dest)
2230 || rtx_equal_p (i2dest, inner_dest)
2231 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2232 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2233 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2234 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2235 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2236
2237 /* This is the same test done in can_combine_p except we can't test
2238 all_adjacent; we don't have to, since this instruction will stay
2239 in place, thus we are not considering increasing the lifetime of
2240 INNER_DEST.
2241
2242 Also, if this insn sets a function argument, combining it with
2243 something that might need a spill could clobber a previous
2244 function argument; the all_adjacent test in can_combine_p also
2245 checks this; here, we do a more specific test for this case. */
2246
2247 || (REG_P (inner_dest)
2248 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2249 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2250 GET_MODE (inner_dest)))
2251 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2252 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2253 return 0;
2254
2255 /* If DEST is used in I3, it is being killed in this insn, so
2256 record that for later. We have to consider paradoxical
2257 subregs here, since they kill the whole register, but we
2258 ignore partial subregs, STRICT_LOW_PART, etc.
2259 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2260 STACK_POINTER_REGNUM, since these are always considered to be
2261 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2262 subdest = dest;
2263 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2264 subdest = SUBREG_REG (subdest);
2265 if (pi3dest_killed
2266 && REG_P (subdest)
2267 && reg_referenced_p (subdest, PATTERN (i3))
2268 && REGNO (subdest) != FRAME_POINTER_REGNUM
2269 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2270 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2271 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2272 || (REGNO (subdest) != ARG_POINTER_REGNUM
2273 || ! fixed_regs [REGNO (subdest)]))
2274 && REGNO (subdest) != STACK_POINTER_REGNUM)
2275 {
2276 if (*pi3dest_killed)
2277 return 0;
2278
2279 *pi3dest_killed = subdest;
2280 }
2281 }
2282
2283 else if (GET_CODE (x) == PARALLEL)
2284 {
2285 int i;
2286
2287 for (i = 0; i < XVECLEN (x, 0); i++)
2288 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2289 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2290 return 0;
2291 }
2292
2293 return 1;
2294 }
2295 \f
2296 /* Return 1 if X is an arithmetic expression that contains a multiplication
2297 and division. We don't count multiplications by powers of two here. */
2298
2299 static int
2300 contains_muldiv (rtx x)
2301 {
2302 switch (GET_CODE (x))
2303 {
2304 case MOD: case DIV: case UMOD: case UDIV:
2305 return 1;
2306
2307 case MULT:
2308 return ! (CONST_INT_P (XEXP (x, 1))
2309 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2310 default:
2311 if (BINARY_P (x))
2312 return contains_muldiv (XEXP (x, 0))
2313 || contains_muldiv (XEXP (x, 1));
2314
2315 if (UNARY_P (x))
2316 return contains_muldiv (XEXP (x, 0));
2317
2318 return 0;
2319 }
2320 }
2321 \f
2322 /* Determine whether INSN can be used in a combination. Return nonzero if
2323 not. This is used in try_combine to detect early some cases where we
2324 can't perform combinations. */
2325
2326 static int
2327 cant_combine_insn_p (rtx_insn *insn)
2328 {
2329 rtx set;
2330 rtx src, dest;
2331
2332 /* If this isn't really an insn, we can't do anything.
2333 This can occur when flow deletes an insn that it has merged into an
2334 auto-increment address. */
2335 if (!NONDEBUG_INSN_P (insn))
2336 return 1;
2337
2338 /* Never combine loads and stores involving hard regs that are likely
2339 to be spilled. The register allocator can usually handle such
2340 reg-reg moves by tying. If we allow the combiner to make
2341 substitutions of likely-spilled regs, reload might die.
2342 As an exception, we allow combinations involving fixed regs; these are
2343 not available to the register allocator so there's no risk involved. */
2344
2345 set = single_set (insn);
2346 if (! set)
2347 return 0;
2348 src = SET_SRC (set);
2349 dest = SET_DEST (set);
2350 if (GET_CODE (src) == SUBREG)
2351 src = SUBREG_REG (src);
2352 if (GET_CODE (dest) == SUBREG)
2353 dest = SUBREG_REG (dest);
2354 if (REG_P (src) && REG_P (dest)
2355 && ((HARD_REGISTER_P (src)
2356 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2357 #ifdef LEAF_REGISTERS
2358 && ! LEAF_REGISTERS [REGNO (src)])
2359 #else
2360 )
2361 #endif
2362 || (HARD_REGISTER_P (dest)
2363 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2364 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2365 return 1;
2366
2367 return 0;
2368 }
2369
2370 struct likely_spilled_retval_info
2371 {
2372 unsigned regno, nregs;
2373 unsigned mask;
2374 };
2375
2376 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2377 hard registers that are known to be written to / clobbered in full. */
2378 static void
2379 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2380 {
2381 struct likely_spilled_retval_info *const info =
2382 (struct likely_spilled_retval_info *) data;
2383 unsigned regno, nregs;
2384 unsigned new_mask;
2385
2386 if (!REG_P (XEXP (set, 0)))
2387 return;
2388 regno = REGNO (x);
2389 if (regno >= info->regno + info->nregs)
2390 return;
2391 nregs = REG_NREGS (x);
2392 if (regno + nregs <= info->regno)
2393 return;
2394 new_mask = (2U << (nregs - 1)) - 1;
2395 if (regno < info->regno)
2396 new_mask >>= info->regno - regno;
2397 else
2398 new_mask <<= regno - info->regno;
2399 info->mask &= ~new_mask;
2400 }
2401
2402 /* Return nonzero iff part of the return value is live during INSN, and
2403 it is likely spilled. This can happen when more than one insn is needed
2404 to copy the return value, e.g. when we consider to combine into the
2405 second copy insn for a complex value. */
2406
2407 static int
2408 likely_spilled_retval_p (rtx_insn *insn)
2409 {
2410 rtx_insn *use = BB_END (this_basic_block);
2411 rtx reg;
2412 rtx_insn *p;
2413 unsigned regno, nregs;
2414 /* We assume here that no machine mode needs more than
2415 32 hard registers when the value overlaps with a register
2416 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2417 unsigned mask;
2418 struct likely_spilled_retval_info info;
2419
2420 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2421 return 0;
2422 reg = XEXP (PATTERN (use), 0);
2423 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2424 return 0;
2425 regno = REGNO (reg);
2426 nregs = REG_NREGS (reg);
2427 if (nregs == 1)
2428 return 0;
2429 mask = (2U << (nregs - 1)) - 1;
2430
2431 /* Disregard parts of the return value that are set later. */
2432 info.regno = regno;
2433 info.nregs = nregs;
2434 info.mask = mask;
2435 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2436 if (INSN_P (p))
2437 note_stores (p, likely_spilled_retval_1, &info);
2438 mask = info.mask;
2439
2440 /* Check if any of the (probably) live return value registers is
2441 likely spilled. */
2442 nregs --;
2443 do
2444 {
2445 if ((mask & 1 << nregs)
2446 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2447 return 1;
2448 } while (nregs--);
2449 return 0;
2450 }
2451
2452 /* Adjust INSN after we made a change to its destination.
2453
2454 Changing the destination can invalidate notes that say something about
2455 the results of the insn and a LOG_LINK pointing to the insn. */
2456
2457 static void
2458 adjust_for_new_dest (rtx_insn *insn)
2459 {
2460 /* For notes, be conservative and simply remove them. */
2461 remove_reg_equal_equiv_notes (insn);
2462
2463 /* The new insn will have a destination that was previously the destination
2464 of an insn just above it. Call distribute_links to make a LOG_LINK from
2465 the next use of that destination. */
2466
2467 rtx set = single_set (insn);
2468 gcc_assert (set);
2469
2470 rtx reg = SET_DEST (set);
2471
2472 while (GET_CODE (reg) == ZERO_EXTRACT
2473 || GET_CODE (reg) == STRICT_LOW_PART
2474 || GET_CODE (reg) == SUBREG)
2475 reg = XEXP (reg, 0);
2476 gcc_assert (REG_P (reg));
2477
2478 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2479
2480 df_insn_rescan (insn);
2481 }
2482
2483 /* Return TRUE if combine can reuse reg X in mode MODE.
2484 ADDED_SETS is nonzero if the original set is still required. */
2485 static bool
2486 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2487 {
2488 unsigned int regno;
2489
2490 if (!REG_P (x))
2491 return false;
2492
2493 /* Don't change between modes with different underlying register sizes,
2494 since this could lead to invalid subregs. */
2495 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2496 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2497 return false;
2498
2499 regno = REGNO (x);
2500 /* Allow hard registers if the new mode is legal, and occupies no more
2501 registers than the old mode. */
2502 if (regno < FIRST_PSEUDO_REGISTER)
2503 return (targetm.hard_regno_mode_ok (regno, mode)
2504 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2505
2506 /* Or a pseudo that is only used once. */
2507 return (regno < reg_n_sets_max
2508 && REG_N_SETS (regno) == 1
2509 && !added_sets
2510 && !REG_USERVAR_P (x));
2511 }
2512
2513
2514 /* Check whether X, the destination of a set, refers to part of
2515 the register specified by REG. */
2516
2517 static bool
2518 reg_subword_p (rtx x, rtx reg)
2519 {
2520 /* Check that reg is an integer mode register. */
2521 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2522 return false;
2523
2524 if (GET_CODE (x) == STRICT_LOW_PART
2525 || GET_CODE (x) == ZERO_EXTRACT)
2526 x = XEXP (x, 0);
2527
2528 return GET_CODE (x) == SUBREG
2529 && SUBREG_REG (x) == reg
2530 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2531 }
2532
2533 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2534 Note that the INSN should be deleted *after* removing dead edges, so
2535 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2536 but not for a (set (pc) (label_ref FOO)). */
2537
2538 static void
2539 update_cfg_for_uncondjump (rtx_insn *insn)
2540 {
2541 basic_block bb = BLOCK_FOR_INSN (insn);
2542 gcc_assert (BB_END (bb) == insn);
2543
2544 purge_dead_edges (bb);
2545
2546 delete_insn (insn);
2547 if (EDGE_COUNT (bb->succs) == 1)
2548 {
2549 rtx_insn *insn;
2550
2551 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2552
2553 /* Remove barriers from the footer if there are any. */
2554 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2555 if (BARRIER_P (insn))
2556 {
2557 if (PREV_INSN (insn))
2558 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2559 else
2560 BB_FOOTER (bb) = NEXT_INSN (insn);
2561 if (NEXT_INSN (insn))
2562 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2563 }
2564 else if (LABEL_P (insn))
2565 break;
2566 }
2567 }
2568
2569 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2570 by an arbitrary number of CLOBBERs. */
2571 static bool
2572 is_parallel_of_n_reg_sets (rtx pat, int n)
2573 {
2574 if (GET_CODE (pat) != PARALLEL)
2575 return false;
2576
2577 int len = XVECLEN (pat, 0);
2578 if (len < n)
2579 return false;
2580
2581 int i;
2582 for (i = 0; i < n; i++)
2583 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2584 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2585 return false;
2586 for ( ; i < len; i++)
2587 switch (GET_CODE (XVECEXP (pat, 0, i)))
2588 {
2589 case CLOBBER:
2590 if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2591 return false;
2592 break;
2593 default:
2594 return false;
2595 }
2596 return true;
2597 }
2598
2599 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2600 CLOBBERs), can be split into individual SETs in that order, without
2601 changing semantics. */
2602 static bool
2603 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2604 {
2605 if (!insn_nothrow_p (insn))
2606 return false;
2607
2608 rtx pat = PATTERN (insn);
2609
2610 int i, j;
2611 for (i = 0; i < n; i++)
2612 {
2613 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2614 return false;
2615
2616 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2617
2618 for (j = i + 1; j < n; j++)
2619 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2620 return false;
2621 }
2622
2623 return true;
2624 }
2625
2626 /* Return whether X is just a single set, with the source
2627 a general_operand. */
2628 static bool
2629 is_just_move (rtx x)
2630 {
2631 if (INSN_P (x))
2632 x = PATTERN (x);
2633
2634 return (GET_CODE (x) == SET && general_operand (SET_SRC (x), VOIDmode));
2635 }
2636
2637 /* Callback function to count autoincs. */
2638
2639 static int
2640 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2641 {
2642 (*((int *) arg))++;
2643
2644 return 0;
2645 }
2646
2647 /* Try to combine the insns I0, I1 and I2 into I3.
2648 Here I0, I1 and I2 appear earlier than I3.
2649 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2650 I3.
2651
2652 If we are combining more than two insns and the resulting insn is not
2653 recognized, try splitting it into two insns. If that happens, I2 and I3
2654 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2655 Otherwise, I0, I1 and I2 are pseudo-deleted.
2656
2657 Return 0 if the combination does not work. Then nothing is changed.
2658 If we did the combination, return the insn at which combine should
2659 resume scanning.
2660
2661 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2662 new direct jump instruction.
2663
2664 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2665 been I3 passed to an earlier try_combine within the same basic
2666 block. */
2667
2668 static rtx_insn *
2669 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2670 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2671 {
2672 /* New patterns for I3 and I2, respectively. */
2673 rtx newpat, newi2pat = 0;
2674 rtvec newpat_vec_with_clobbers = 0;
2675 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2676 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2677 dead. */
2678 int added_sets_0, added_sets_1, added_sets_2;
2679 /* Total number of SETs to put into I3. */
2680 int total_sets;
2681 /* Nonzero if I2's or I1's body now appears in I3. */
2682 int i2_is_used = 0, i1_is_used = 0;
2683 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2684 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2685 /* Contains I3 if the destination of I3 is used in its source, which means
2686 that the old life of I3 is being killed. If that usage is placed into
2687 I2 and not in I3, a REG_DEAD note must be made. */
2688 rtx i3dest_killed = 0;
2689 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2690 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2691 /* Copy of SET_SRC of I1 and I0, if needed. */
2692 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2693 /* Set if I2DEST was reused as a scratch register. */
2694 bool i2scratch = false;
2695 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2696 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2697 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2698 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2699 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2700 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2701 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2702 /* Notes that must be added to REG_NOTES in I3 and I2. */
2703 rtx new_i3_notes, new_i2_notes;
2704 /* Notes that we substituted I3 into I2 instead of the normal case. */
2705 int i3_subst_into_i2 = 0;
2706 /* Notes that I1, I2 or I3 is a MULT operation. */
2707 int have_mult = 0;
2708 int swap_i2i3 = 0;
2709 int split_i2i3 = 0;
2710 int changed_i3_dest = 0;
2711 bool i2_was_move = false, i3_was_move = false;
2712 int n_auto_inc = 0;
2713
2714 int maxreg;
2715 rtx_insn *temp_insn;
2716 rtx temp_expr;
2717 struct insn_link *link;
2718 rtx other_pat = 0;
2719 rtx new_other_notes;
2720 int i;
2721 scalar_int_mode dest_mode, temp_mode;
2722
2723 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2724 never be). */
2725 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2726 return 0;
2727
2728 /* Only try four-insn combinations when there's high likelihood of
2729 success. Look for simple insns, such as loads of constants or
2730 binary operations involving a constant. */
2731 if (i0)
2732 {
2733 int i;
2734 int ngood = 0;
2735 int nshift = 0;
2736 rtx set0, set3;
2737
2738 if (!flag_expensive_optimizations)
2739 return 0;
2740
2741 for (i = 0; i < 4; i++)
2742 {
2743 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2744 rtx set = single_set (insn);
2745 rtx src;
2746 if (!set)
2747 continue;
2748 src = SET_SRC (set);
2749 if (CONSTANT_P (src))
2750 {
2751 ngood += 2;
2752 break;
2753 }
2754 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2755 ngood++;
2756 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2757 || GET_CODE (src) == LSHIFTRT)
2758 nshift++;
2759 }
2760
2761 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2762 are likely manipulating its value. Ideally we'll be able to combine
2763 all four insns into a bitfield insertion of some kind.
2764
2765 Note the source in I0 might be inside a sign/zero extension and the
2766 memory modes in I0 and I3 might be different. So extract the address
2767 from the destination of I3 and search for it in the source of I0.
2768
2769 In the event that there's a match but the source/dest do not actually
2770 refer to the same memory, the worst that happens is we try some
2771 combinations that we wouldn't have otherwise. */
2772 if ((set0 = single_set (i0))
2773 /* Ensure the source of SET0 is a MEM, possibly buried inside
2774 an extension. */
2775 && (GET_CODE (SET_SRC (set0)) == MEM
2776 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2777 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2778 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2779 && (set3 = single_set (i3))
2780 /* Ensure the destination of SET3 is a MEM. */
2781 && GET_CODE (SET_DEST (set3)) == MEM
2782 /* Would it be better to extract the base address for the MEM
2783 in SET3 and look for that? I don't have cases where it matters
2784 but I could envision such cases. */
2785 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2786 ngood += 2;
2787
2788 if (ngood < 2 && nshift < 2)
2789 return 0;
2790 }
2791
2792 /* Exit early if one of the insns involved can't be used for
2793 combinations. */
2794 if (CALL_P (i2)
2795 || (i1 && CALL_P (i1))
2796 || (i0 && CALL_P (i0))
2797 || cant_combine_insn_p (i3)
2798 || cant_combine_insn_p (i2)
2799 || (i1 && cant_combine_insn_p (i1))
2800 || (i0 && cant_combine_insn_p (i0))
2801 || likely_spilled_retval_p (i3))
2802 return 0;
2803
2804 combine_attempts++;
2805 undobuf.other_insn = 0;
2806
2807 /* Reset the hard register usage information. */
2808 CLEAR_HARD_REG_SET (newpat_used_regs);
2809
2810 if (dump_file && (dump_flags & TDF_DETAILS))
2811 {
2812 if (i0)
2813 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2814 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2815 else if (i1)
2816 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2817 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2818 else
2819 fprintf (dump_file, "\nTrying %d -> %d:\n",
2820 INSN_UID (i2), INSN_UID (i3));
2821
2822 if (i0)
2823 dump_insn_slim (dump_file, i0);
2824 if (i1)
2825 dump_insn_slim (dump_file, i1);
2826 dump_insn_slim (dump_file, i2);
2827 dump_insn_slim (dump_file, i3);
2828 }
2829
2830 /* If multiple insns feed into one of I2 or I3, they can be in any
2831 order. To simplify the code below, reorder them in sequence. */
2832 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2833 std::swap (i0, i2);
2834 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2835 std::swap (i0, i1);
2836 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2837 std::swap (i1, i2);
2838
2839 added_links_insn = 0;
2840 added_notes_insn = 0;
2841
2842 /* First check for one important special case that the code below will
2843 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2844 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2845 we may be able to replace that destination with the destination of I3.
2846 This occurs in the common code where we compute both a quotient and
2847 remainder into a structure, in which case we want to do the computation
2848 directly into the structure to avoid register-register copies.
2849
2850 Note that this case handles both multiple sets in I2 and also cases
2851 where I2 has a number of CLOBBERs inside the PARALLEL.
2852
2853 We make very conservative checks below and only try to handle the
2854 most common cases of this. For example, we only handle the case
2855 where I2 and I3 are adjacent to avoid making difficult register
2856 usage tests. */
2857
2858 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2859 && REG_P (SET_SRC (PATTERN (i3)))
2860 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2861 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2862 && GET_CODE (PATTERN (i2)) == PARALLEL
2863 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2864 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2865 below would need to check what is inside (and reg_overlap_mentioned_p
2866 doesn't support those codes anyway). Don't allow those destinations;
2867 the resulting insn isn't likely to be recognized anyway. */
2868 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2869 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2870 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2871 SET_DEST (PATTERN (i3)))
2872 && next_active_insn (i2) == i3)
2873 {
2874 rtx p2 = PATTERN (i2);
2875
2876 /* Make sure that the destination of I3,
2877 which we are going to substitute into one output of I2,
2878 is not used within another output of I2. We must avoid making this:
2879 (parallel [(set (mem (reg 69)) ...)
2880 (set (reg 69) ...)])
2881 which is not well-defined as to order of actions.
2882 (Besides, reload can't handle output reloads for this.)
2883
2884 The problem can also happen if the dest of I3 is a memory ref,
2885 if another dest in I2 is an indirect memory ref.
2886
2887 Neither can this PARALLEL be an asm. We do not allow combining
2888 that usually (see can_combine_p), so do not here either. */
2889 bool ok = true;
2890 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2891 {
2892 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2893 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2894 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2895 SET_DEST (XVECEXP (p2, 0, i))))
2896 ok = false;
2897 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2898 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2899 ok = false;
2900 }
2901
2902 if (ok)
2903 for (i = 0; i < XVECLEN (p2, 0); i++)
2904 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2905 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2906 {
2907 combine_merges++;
2908
2909 subst_insn = i3;
2910 subst_low_luid = DF_INSN_LUID (i2);
2911
2912 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2913 i2src = SET_SRC (XVECEXP (p2, 0, i));
2914 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2915 i2dest_killed = dead_or_set_p (i2, i2dest);
2916
2917 /* Replace the dest in I2 with our dest and make the resulting
2918 insn the new pattern for I3. Then skip to where we validate
2919 the pattern. Everything was set up above. */
2920 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2921 newpat = p2;
2922 i3_subst_into_i2 = 1;
2923 goto validate_replacement;
2924 }
2925 }
2926
2927 /* If I2 is setting a pseudo to a constant and I3 is setting some
2928 sub-part of it to another constant, merge them by making a new
2929 constant. */
2930 if (i1 == 0
2931 && (temp_expr = single_set (i2)) != 0
2932 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2933 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2934 && GET_CODE (PATTERN (i3)) == SET
2935 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2936 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2937 {
2938 rtx dest = SET_DEST (PATTERN (i3));
2939 rtx temp_dest = SET_DEST (temp_expr);
2940 int offset = -1;
2941 int width = 0;
2942
2943 if (GET_CODE (dest) == ZERO_EXTRACT)
2944 {
2945 if (CONST_INT_P (XEXP (dest, 1))
2946 && CONST_INT_P (XEXP (dest, 2))
2947 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2948 &dest_mode))
2949 {
2950 width = INTVAL (XEXP (dest, 1));
2951 offset = INTVAL (XEXP (dest, 2));
2952 dest = XEXP (dest, 0);
2953 if (BITS_BIG_ENDIAN)
2954 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2955 }
2956 }
2957 else
2958 {
2959 if (GET_CODE (dest) == STRICT_LOW_PART)
2960 dest = XEXP (dest, 0);
2961 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2962 {
2963 width = GET_MODE_PRECISION (dest_mode);
2964 offset = 0;
2965 }
2966 }
2967
2968 if (offset >= 0)
2969 {
2970 /* If this is the low part, we're done. */
2971 if (subreg_lowpart_p (dest))
2972 ;
2973 /* Handle the case where inner is twice the size of outer. */
2974 else if (GET_MODE_PRECISION (temp_mode)
2975 == 2 * GET_MODE_PRECISION (dest_mode))
2976 offset += GET_MODE_PRECISION (dest_mode);
2977 /* Otherwise give up for now. */
2978 else
2979 offset = -1;
2980 }
2981
2982 if (offset >= 0)
2983 {
2984 rtx inner = SET_SRC (PATTERN (i3));
2985 rtx outer = SET_SRC (temp_expr);
2986
2987 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2988 rtx_mode_t (inner, dest_mode),
2989 offset, width);
2990
2991 combine_merges++;
2992 subst_insn = i3;
2993 subst_low_luid = DF_INSN_LUID (i2);
2994 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2995 i2dest = temp_dest;
2996 i2dest_killed = dead_or_set_p (i2, i2dest);
2997
2998 /* Replace the source in I2 with the new constant and make the
2999 resulting insn the new pattern for I3. Then skip to where we
3000 validate the pattern. Everything was set up above. */
3001 SUBST (SET_SRC (temp_expr),
3002 immed_wide_int_const (o, temp_mode));
3003
3004 newpat = PATTERN (i2);
3005
3006 /* The dest of I3 has been replaced with the dest of I2. */
3007 changed_i3_dest = 1;
3008 goto validate_replacement;
3009 }
3010 }
3011
3012 /* If we have no I1 and I2 looks like:
3013 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3014 (set Y OP)])
3015 make up a dummy I1 that is
3016 (set Y OP)
3017 and change I2 to be
3018 (set (reg:CC X) (compare:CC Y (const_int 0)))
3019
3020 (We can ignore any trailing CLOBBERs.)
3021
3022 This undoes a previous combination and allows us to match a branch-and-
3023 decrement insn. */
3024
3025 if (!HAVE_cc0 && i1 == 0
3026 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3027 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
3028 == MODE_CC)
3029 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
3030 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
3031 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
3032 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
3033 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3034 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3035 {
3036 /* We make I1 with the same INSN_UID as I2. This gives it
3037 the same DF_INSN_LUID for value tracking. Our fake I1 will
3038 never appear in the insn stream so giving it the same INSN_UID
3039 as I2 will not cause a problem. */
3040
3041 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3042 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3043 -1, NULL_RTX);
3044 INSN_UID (i1) = INSN_UID (i2);
3045
3046 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3047 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3048 SET_DEST (PATTERN (i1)));
3049 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3050 SUBST_LINK (LOG_LINKS (i2),
3051 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3052 }
3053
3054 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3055 make those two SETs separate I1 and I2 insns, and make an I0 that is
3056 the original I1. */
3057 if (!HAVE_cc0 && i0 == 0
3058 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3059 && can_split_parallel_of_n_reg_sets (i2, 2)
3060 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3061 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3062 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3063 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3064 {
3065 /* If there is no I1, there is no I0 either. */
3066 i0 = i1;
3067
3068 /* We make I1 with the same INSN_UID as I2. This gives it
3069 the same DF_INSN_LUID for value tracking. Our fake I1 will
3070 never appear in the insn stream so giving it the same INSN_UID
3071 as I2 will not cause a problem. */
3072
3073 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3074 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3075 -1, NULL_RTX);
3076 INSN_UID (i1) = INSN_UID (i2);
3077
3078 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3079 }
3080
3081 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3082 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3083 {
3084 if (dump_file && (dump_flags & TDF_DETAILS))
3085 fprintf (dump_file, "Can't combine i2 into i3\n");
3086 undo_all ();
3087 return 0;
3088 }
3089 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3090 {
3091 if (dump_file && (dump_flags & TDF_DETAILS))
3092 fprintf (dump_file, "Can't combine i1 into i3\n");
3093 undo_all ();
3094 return 0;
3095 }
3096 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3097 {
3098 if (dump_file && (dump_flags & TDF_DETAILS))
3099 fprintf (dump_file, "Can't combine i0 into i3\n");
3100 undo_all ();
3101 return 0;
3102 }
3103
3104 /* Record whether i2 and i3 are trivial moves. */
3105 i2_was_move = is_just_move (i2);
3106 i3_was_move = is_just_move (i3);
3107
3108 /* Record whether I2DEST is used in I2SRC and similarly for the other
3109 cases. Knowing this will help in register status updating below. */
3110 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3111 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3112 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3113 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3114 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3115 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3116 i2dest_killed = dead_or_set_p (i2, i2dest);
3117 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3118 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3119
3120 /* For the earlier insns, determine which of the subsequent ones they
3121 feed. */
3122 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3123 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3124 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3125 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3126 && reg_overlap_mentioned_p (i0dest, i2src))));
3127
3128 /* Ensure that I3's pattern can be the destination of combines. */
3129 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3130 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3131 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3132 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3133 &i3dest_killed))
3134 {
3135 undo_all ();
3136 return 0;
3137 }
3138
3139 /* See if any of the insns is a MULT operation. Unless one is, we will
3140 reject a combination that is, since it must be slower. Be conservative
3141 here. */
3142 if (GET_CODE (i2src) == MULT
3143 || (i1 != 0 && GET_CODE (i1src) == MULT)
3144 || (i0 != 0 && GET_CODE (i0src) == MULT)
3145 || (GET_CODE (PATTERN (i3)) == SET
3146 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3147 have_mult = 1;
3148
3149 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3150 We used to do this EXCEPT in one case: I3 has a post-inc in an
3151 output operand. However, that exception can give rise to insns like
3152 mov r3,(r3)+
3153 which is a famous insn on the PDP-11 where the value of r3 used as the
3154 source was model-dependent. Avoid this sort of thing. */
3155
3156 #if 0
3157 if (!(GET_CODE (PATTERN (i3)) == SET
3158 && REG_P (SET_SRC (PATTERN (i3)))
3159 && MEM_P (SET_DEST (PATTERN (i3)))
3160 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3161 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3162 /* It's not the exception. */
3163 #endif
3164 if (AUTO_INC_DEC)
3165 {
3166 rtx link;
3167 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3168 if (REG_NOTE_KIND (link) == REG_INC
3169 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3170 || (i1 != 0
3171 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3172 {
3173 undo_all ();
3174 return 0;
3175 }
3176 }
3177
3178 /* See if the SETs in I1 or I2 need to be kept around in the merged
3179 instruction: whenever the value set there is still needed past I3.
3180 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3181
3182 For the SET in I1, we have two cases: if I1 and I2 independently feed
3183 into I3, the set in I1 needs to be kept around unless I1DEST dies
3184 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3185 in I1 needs to be kept around unless I1DEST dies or is set in either
3186 I2 or I3. The same considerations apply to I0. */
3187
3188 added_sets_2 = !dead_or_set_p (i3, i2dest);
3189
3190 if (i1)
3191 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3192 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3193 else
3194 added_sets_1 = 0;
3195
3196 if (i0)
3197 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3198 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3199 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3200 && dead_or_set_p (i2, i0dest)));
3201 else
3202 added_sets_0 = 0;
3203
3204 /* We are about to copy insns for the case where they need to be kept
3205 around. Check that they can be copied in the merged instruction. */
3206
3207 if (targetm.cannot_copy_insn_p
3208 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3209 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3210 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3211 {
3212 undo_all ();
3213 return 0;
3214 }
3215
3216 /* Count how many auto_inc expressions there were in the original insns;
3217 we need to have the same number in the resulting patterns. */
3218
3219 if (i0)
3220 for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3221 if (i1)
3222 for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3223 for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3224 for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3225
3226 /* If the set in I2 needs to be kept around, we must make a copy of
3227 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3228 PATTERN (I2), we are only substituting for the original I1DEST, not into
3229 an already-substituted copy. This also prevents making self-referential
3230 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3231 I2DEST. */
3232
3233 if (added_sets_2)
3234 {
3235 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3236 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3237 else
3238 i2pat = copy_rtx (PATTERN (i2));
3239 }
3240
3241 if (added_sets_1)
3242 {
3243 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3244 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3245 else
3246 i1pat = copy_rtx (PATTERN (i1));
3247 }
3248
3249 if (added_sets_0)
3250 {
3251 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3252 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3253 else
3254 i0pat = copy_rtx (PATTERN (i0));
3255 }
3256
3257 combine_merges++;
3258
3259 /* Substitute in the latest insn for the regs set by the earlier ones. */
3260
3261 maxreg = max_reg_num ();
3262
3263 subst_insn = i3;
3264
3265 /* Many machines that don't use CC0 have insns that can both perform an
3266 arithmetic operation and set the condition code. These operations will
3267 be represented as a PARALLEL with the first element of the vector
3268 being a COMPARE of an arithmetic operation with the constant zero.
3269 The second element of the vector will set some pseudo to the result
3270 of the same arithmetic operation. If we simplify the COMPARE, we won't
3271 match such a pattern and so will generate an extra insn. Here we test
3272 for this case, where both the comparison and the operation result are
3273 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3274 I2SRC. Later we will make the PARALLEL that contains I2. */
3275
3276 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3277 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3278 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3279 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3280 {
3281 rtx newpat_dest;
3282 rtx *cc_use_loc = NULL;
3283 rtx_insn *cc_use_insn = NULL;
3284 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3285 machine_mode compare_mode, orig_compare_mode;
3286 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3287 scalar_int_mode mode;
3288
3289 newpat = PATTERN (i3);
3290 newpat_dest = SET_DEST (newpat);
3291 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3292
3293 if (undobuf.other_insn == 0
3294 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3295 &cc_use_insn)))
3296 {
3297 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3298 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3299 compare_code = simplify_compare_const (compare_code, mode,
3300 op0, &op1);
3301 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3302 }
3303
3304 /* Do the rest only if op1 is const0_rtx, which may be the
3305 result of simplification. */
3306 if (op1 == const0_rtx)
3307 {
3308 /* If a single use of the CC is found, prepare to modify it
3309 when SELECT_CC_MODE returns a new CC-class mode, or when
3310 the above simplify_compare_const() returned a new comparison
3311 operator. undobuf.other_insn is assigned the CC use insn
3312 when modifying it. */
3313 if (cc_use_loc)
3314 {
3315 #ifdef SELECT_CC_MODE
3316 machine_mode new_mode
3317 = SELECT_CC_MODE (compare_code, op0, op1);
3318 if (new_mode != orig_compare_mode
3319 && can_change_dest_mode (SET_DEST (newpat),
3320 added_sets_2, new_mode))
3321 {
3322 unsigned int regno = REGNO (newpat_dest);
3323 compare_mode = new_mode;
3324 if (regno < FIRST_PSEUDO_REGISTER)
3325 newpat_dest = gen_rtx_REG (compare_mode, regno);
3326 else
3327 {
3328 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3329 newpat_dest = regno_reg_rtx[regno];
3330 }
3331 }
3332 #endif
3333 /* Cases for modifying the CC-using comparison. */
3334 if (compare_code != orig_compare_code
3335 /* ??? Do we need to verify the zero rtx? */
3336 && XEXP (*cc_use_loc, 1) == const0_rtx)
3337 {
3338 /* Replace cc_use_loc with entire new RTX. */
3339 SUBST (*cc_use_loc,
3340 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3341 newpat_dest, const0_rtx));
3342 undobuf.other_insn = cc_use_insn;
3343 }
3344 else if (compare_mode != orig_compare_mode)
3345 {
3346 /* Just replace the CC reg with a new mode. */
3347 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3348 undobuf.other_insn = cc_use_insn;
3349 }
3350 }
3351
3352 /* Now we modify the current newpat:
3353 First, SET_DEST(newpat) is updated if the CC mode has been
3354 altered. For targets without SELECT_CC_MODE, this should be
3355 optimized away. */
3356 if (compare_mode != orig_compare_mode)
3357 SUBST (SET_DEST (newpat), newpat_dest);
3358 /* This is always done to propagate i2src into newpat. */
3359 SUBST (SET_SRC (newpat),
3360 gen_rtx_COMPARE (compare_mode, op0, op1));
3361 /* Create new version of i2pat if needed; the below PARALLEL
3362 creation needs this to work correctly. */
3363 if (! rtx_equal_p (i2src, op0))
3364 i2pat = gen_rtx_SET (i2dest, op0);
3365 i2_is_used = 1;
3366 }
3367 }
3368
3369 if (i2_is_used == 0)
3370 {
3371 /* It is possible that the source of I2 or I1 may be performing
3372 an unneeded operation, such as a ZERO_EXTEND of something
3373 that is known to have the high part zero. Handle that case
3374 by letting subst look at the inner insns.
3375
3376 Another way to do this would be to have a function that tries
3377 to simplify a single insn instead of merging two or more
3378 insns. We don't do this because of the potential of infinite
3379 loops and because of the potential extra memory required.
3380 However, doing it the way we are is a bit of a kludge and
3381 doesn't catch all cases.
3382
3383 But only do this if -fexpensive-optimizations since it slows
3384 things down and doesn't usually win.
3385
3386 This is not done in the COMPARE case above because the
3387 unmodified I2PAT is used in the PARALLEL and so a pattern
3388 with a modified I2SRC would not match. */
3389
3390 if (flag_expensive_optimizations)
3391 {
3392 /* Pass pc_rtx so no substitutions are done, just
3393 simplifications. */
3394 if (i1)
3395 {
3396 subst_low_luid = DF_INSN_LUID (i1);
3397 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3398 }
3399
3400 subst_low_luid = DF_INSN_LUID (i2);
3401 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3402 }
3403
3404 n_occurrences = 0; /* `subst' counts here */
3405 subst_low_luid = DF_INSN_LUID (i2);
3406
3407 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3408 copy of I2SRC each time we substitute it, in order to avoid creating
3409 self-referential RTL when we will be substituting I1SRC for I1DEST
3410 later. Likewise if I0 feeds into I2, either directly or indirectly
3411 through I1, and I0DEST is in I0SRC. */
3412 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3413 (i1_feeds_i2_n && i1dest_in_i1src)
3414 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3415 && i0dest_in_i0src));
3416 substed_i2 = 1;
3417
3418 /* Record whether I2's body now appears within I3's body. */
3419 i2_is_used = n_occurrences;
3420 }
3421
3422 /* If we already got a failure, don't try to do more. Otherwise, try to
3423 substitute I1 if we have it. */
3424
3425 if (i1 && GET_CODE (newpat) != CLOBBER)
3426 {
3427 /* Before we can do this substitution, we must redo the test done
3428 above (see detailed comments there) that ensures I1DEST isn't
3429 mentioned in any SETs in NEWPAT that are field assignments. */
3430 if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3431 0, 0, 0))
3432 {
3433 undo_all ();
3434 return 0;
3435 }
3436
3437 n_occurrences = 0;
3438 subst_low_luid = DF_INSN_LUID (i1);
3439
3440 /* If the following substitution will modify I1SRC, make a copy of it
3441 for the case where it is substituted for I1DEST in I2PAT later. */
3442 if (added_sets_2 && i1_feeds_i2_n)
3443 i1src_copy = copy_rtx (i1src);
3444
3445 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3446 copy of I1SRC each time we substitute it, in order to avoid creating
3447 self-referential RTL when we will be substituting I0SRC for I0DEST
3448 later. */
3449 newpat = subst (newpat, i1dest, i1src, 0, 0,
3450 i0_feeds_i1_n && i0dest_in_i0src);
3451 substed_i1 = 1;
3452
3453 /* Record whether I1's body now appears within I3's body. */
3454 i1_is_used = n_occurrences;
3455 }
3456
3457 /* Likewise for I0 if we have it. */
3458
3459 if (i0 && GET_CODE (newpat) != CLOBBER)
3460 {
3461 if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3462 0, 0, 0))
3463 {
3464 undo_all ();
3465 return 0;
3466 }
3467
3468 /* If the following substitution will modify I0SRC, make a copy of it
3469 for the case where it is substituted for I0DEST in I1PAT later. */
3470 if (added_sets_1 && i0_feeds_i1_n)
3471 i0src_copy = copy_rtx (i0src);
3472 /* And a copy for I0DEST in I2PAT substitution. */
3473 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3474 || (i0_feeds_i2_n)))
3475 i0src_copy2 = copy_rtx (i0src);
3476
3477 n_occurrences = 0;
3478 subst_low_luid = DF_INSN_LUID (i0);
3479 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3480 substed_i0 = 1;
3481 }
3482
3483 if (n_auto_inc)
3484 {
3485 int new_n_auto_inc = 0;
3486 for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3487
3488 if (n_auto_inc != new_n_auto_inc)
3489 {
3490 if (dump_file && (dump_flags & TDF_DETAILS))
3491 fprintf (dump_file, "Number of auto_inc expressions changed\n");
3492 undo_all ();
3493 return 0;
3494 }
3495 }
3496
3497 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3498 to count all the ways that I2SRC and I1SRC can be used. */
3499 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3500 && i2_is_used + added_sets_2 > 1)
3501 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3502 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3503 > 1))
3504 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3505 && (n_occurrences + added_sets_0
3506 + (added_sets_1 && i0_feeds_i1_n)
3507 + (added_sets_2 && i0_feeds_i2_n)
3508 > 1))
3509 /* Fail if we tried to make a new register. */
3510 || max_reg_num () != maxreg
3511 /* Fail if we couldn't do something and have a CLOBBER. */
3512 || GET_CODE (newpat) == CLOBBER
3513 /* Fail if this new pattern is a MULT and we didn't have one before
3514 at the outer level. */
3515 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3516 && ! have_mult))
3517 {
3518 undo_all ();
3519 return 0;
3520 }
3521
3522 /* If the actions of the earlier insns must be kept
3523 in addition to substituting them into the latest one,
3524 we must make a new PARALLEL for the latest insn
3525 to hold additional the SETs. */
3526
3527 if (added_sets_0 || added_sets_1 || added_sets_2)
3528 {
3529 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3530 combine_extras++;
3531
3532 if (GET_CODE (newpat) == PARALLEL)
3533 {
3534 rtvec old = XVEC (newpat, 0);
3535 total_sets = XVECLEN (newpat, 0) + extra_sets;
3536 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3537 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3538 sizeof (old->elem[0]) * old->num_elem);
3539 }
3540 else
3541 {
3542 rtx old = newpat;
3543 total_sets = 1 + extra_sets;
3544 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3545 XVECEXP (newpat, 0, 0) = old;
3546 }
3547
3548 if (added_sets_0)
3549 XVECEXP (newpat, 0, --total_sets) = i0pat;
3550
3551 if (added_sets_1)
3552 {
3553 rtx t = i1pat;
3554 if (i0_feeds_i1_n)
3555 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3556
3557 XVECEXP (newpat, 0, --total_sets) = t;
3558 }
3559 if (added_sets_2)
3560 {
3561 rtx t = i2pat;
3562 if (i1_feeds_i2_n)
3563 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3564 i0_feeds_i1_n && i0dest_in_i0src);
3565 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3566 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3567
3568 XVECEXP (newpat, 0, --total_sets) = t;
3569 }
3570 }
3571
3572 validate_replacement:
3573
3574 /* Note which hard regs this insn has as inputs. */
3575 mark_used_regs_combine (newpat);
3576
3577 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3578 consider splitting this pattern, we might need these clobbers. */
3579 if (i1 && GET_CODE (newpat) == PARALLEL
3580 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3581 {
3582 int len = XVECLEN (newpat, 0);
3583
3584 newpat_vec_with_clobbers = rtvec_alloc (len);
3585 for (i = 0; i < len; i++)
3586 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3587 }
3588
3589 /* We have recognized nothing yet. */
3590 insn_code_number = -1;
3591
3592 /* See if this is a PARALLEL of two SETs where one SET's destination is
3593 a register that is unused and this isn't marked as an instruction that
3594 might trap in an EH region. In that case, we just need the other SET.
3595 We prefer this over the PARALLEL.
3596
3597 This can occur when simplifying a divmod insn. We *must* test for this
3598 case here because the code below that splits two independent SETs doesn't
3599 handle this case correctly when it updates the register status.
3600
3601 It's pointless doing this if we originally had two sets, one from
3602 i3, and one from i2. Combining then splitting the parallel results
3603 in the original i2 again plus an invalid insn (which we delete).
3604 The net effect is only to move instructions around, which makes
3605 debug info less accurate.
3606
3607 If the remaining SET came from I2 its destination should not be used
3608 between I2 and I3. See PR82024. */
3609
3610 if (!(added_sets_2 && i1 == 0)
3611 && is_parallel_of_n_reg_sets (newpat, 2)
3612 && asm_noperands (newpat) < 0)
3613 {
3614 rtx set0 = XVECEXP (newpat, 0, 0);
3615 rtx set1 = XVECEXP (newpat, 0, 1);
3616 rtx oldpat = newpat;
3617
3618 if (((REG_P (SET_DEST (set1))
3619 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3620 || (GET_CODE (SET_DEST (set1)) == SUBREG
3621 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3622 && insn_nothrow_p (i3)
3623 && !side_effects_p (SET_SRC (set1)))
3624 {
3625 newpat = set0;
3626 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3627 }
3628
3629 else if (((REG_P (SET_DEST (set0))
3630 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3631 || (GET_CODE (SET_DEST (set0)) == SUBREG
3632 && find_reg_note (i3, REG_UNUSED,
3633 SUBREG_REG (SET_DEST (set0)))))
3634 && insn_nothrow_p (i3)
3635 && !side_effects_p (SET_SRC (set0)))
3636 {
3637 rtx dest = SET_DEST (set1);
3638 if (GET_CODE (dest) == SUBREG)
3639 dest = SUBREG_REG (dest);
3640 if (!reg_used_between_p (dest, i2, i3))
3641 {
3642 newpat = set1;
3643 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3644
3645 if (insn_code_number >= 0)
3646 changed_i3_dest = 1;
3647 }
3648 }
3649
3650 if (insn_code_number < 0)
3651 newpat = oldpat;
3652 }
3653
3654 /* Is the result of combination a valid instruction? */
3655 if (insn_code_number < 0)
3656 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3657
3658 /* If we were combining three insns and the result is a simple SET
3659 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3660 insns. There are two ways to do this. It can be split using a
3661 machine-specific method (like when you have an addition of a large
3662 constant) or by combine in the function find_split_point. */
3663
3664 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3665 && asm_noperands (newpat) < 0)
3666 {
3667 rtx parallel, *split;
3668 rtx_insn *m_split_insn;
3669
3670 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3671 use I2DEST as a scratch register will help. In the latter case,
3672 convert I2DEST to the mode of the source of NEWPAT if we can. */
3673
3674 m_split_insn = combine_split_insns (newpat, i3);
3675
3676 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3677 inputs of NEWPAT. */
3678
3679 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3680 possible to try that as a scratch reg. This would require adding
3681 more code to make it work though. */
3682
3683 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3684 {
3685 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3686
3687 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3688 (temporarily, until we are committed to this instruction
3689 combination) does not work: for example, any call to nonzero_bits
3690 on the register (from a splitter in the MD file, for example)
3691 will get the old information, which is invalid.
3692
3693 Since nowadays we can create registers during combine just fine,
3694 we should just create a new one here, not reuse i2dest. */
3695
3696 /* First try to split using the original register as a
3697 scratch register. */
3698 parallel = gen_rtx_PARALLEL (VOIDmode,
3699 gen_rtvec (2, newpat,
3700 gen_rtx_CLOBBER (VOIDmode,
3701 i2dest)));
3702 m_split_insn = combine_split_insns (parallel, i3);
3703
3704 /* If that didn't work, try changing the mode of I2DEST if
3705 we can. */
3706 if (m_split_insn == 0
3707 && new_mode != GET_MODE (i2dest)
3708 && new_mode != VOIDmode
3709 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3710 {
3711 machine_mode old_mode = GET_MODE (i2dest);
3712 rtx ni2dest;
3713
3714 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3715 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3716 else
3717 {
3718 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3719 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3720 }
3721
3722 parallel = (gen_rtx_PARALLEL
3723 (VOIDmode,
3724 gen_rtvec (2, newpat,
3725 gen_rtx_CLOBBER (VOIDmode,
3726 ni2dest))));
3727 m_split_insn = combine_split_insns (parallel, i3);
3728
3729 if (m_split_insn == 0
3730 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3731 {
3732 struct undo *buf;
3733
3734 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3735 buf = undobuf.undos;
3736 undobuf.undos = buf->next;
3737 buf->next = undobuf.frees;
3738 undobuf.frees = buf;
3739 }
3740 }
3741
3742 i2scratch = m_split_insn != 0;
3743 }
3744
3745 /* If recog_for_combine has discarded clobbers, try to use them
3746 again for the split. */
3747 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3748 {
3749 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3750 m_split_insn = combine_split_insns (parallel, i3);
3751 }
3752
3753 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3754 {
3755 rtx m_split_pat = PATTERN (m_split_insn);
3756 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3757 if (insn_code_number >= 0)
3758 newpat = m_split_pat;
3759 }
3760 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3761 && (next_nonnote_nondebug_insn (i2) == i3
3762 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3763 {
3764 rtx i2set, i3set;
3765 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3766 newi2pat = PATTERN (m_split_insn);
3767
3768 i3set = single_set (NEXT_INSN (m_split_insn));
3769 i2set = single_set (m_split_insn);
3770
3771 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3772
3773 /* If I2 or I3 has multiple SETs, we won't know how to track
3774 register status, so don't use these insns. If I2's destination
3775 is used between I2 and I3, we also can't use these insns. */
3776
3777 if (i2_code_number >= 0 && i2set && i3set
3778 && (next_nonnote_nondebug_insn (i2) == i3
3779 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3780 insn_code_number = recog_for_combine (&newi3pat, i3,
3781 &new_i3_notes);
3782 if (insn_code_number >= 0)
3783 newpat = newi3pat;
3784
3785 /* It is possible that both insns now set the destination of I3.
3786 If so, we must show an extra use of it. */
3787
3788 if (insn_code_number >= 0)
3789 {
3790 rtx new_i3_dest = SET_DEST (i3set);
3791 rtx new_i2_dest = SET_DEST (i2set);
3792
3793 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3794 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3795 || GET_CODE (new_i3_dest) == SUBREG)
3796 new_i3_dest = XEXP (new_i3_dest, 0);
3797
3798 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3799 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3800 || GET_CODE (new_i2_dest) == SUBREG)
3801 new_i2_dest = XEXP (new_i2_dest, 0);
3802
3803 if (REG_P (new_i3_dest)
3804 && REG_P (new_i2_dest)
3805 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3806 && REGNO (new_i2_dest) < reg_n_sets_max)
3807 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3808 }
3809 }
3810
3811 /* If we can split it and use I2DEST, go ahead and see if that
3812 helps things be recognized. Verify that none of the registers
3813 are set between I2 and I3. */
3814 if (insn_code_number < 0
3815 && (split = find_split_point (&newpat, i3, false)) != 0
3816 && (!HAVE_cc0 || REG_P (i2dest))
3817 /* We need I2DEST in the proper mode. If it is a hard register
3818 or the only use of a pseudo, we can change its mode.
3819 Make sure we don't change a hard register to have a mode that
3820 isn't valid for it, or change the number of registers. */
3821 && (GET_MODE (*split) == GET_MODE (i2dest)
3822 || GET_MODE (*split) == VOIDmode
3823 || can_change_dest_mode (i2dest, added_sets_2,
3824 GET_MODE (*split)))
3825 && (next_nonnote_nondebug_insn (i2) == i3
3826 || !modified_between_p (*split, i2, i3))
3827 /* We can't overwrite I2DEST if its value is still used by
3828 NEWPAT. */
3829 && ! reg_referenced_p (i2dest, newpat))
3830 {
3831 rtx newdest = i2dest;
3832 enum rtx_code split_code = GET_CODE (*split);
3833 machine_mode split_mode = GET_MODE (*split);
3834 bool subst_done = false;
3835 newi2pat = NULL_RTX;
3836
3837 i2scratch = true;
3838
3839 /* *SPLIT may be part of I2SRC, so make sure we have the
3840 original expression around for later debug processing.
3841 We should not need I2SRC any more in other cases. */
3842 if (MAY_HAVE_DEBUG_BIND_INSNS)
3843 i2src = copy_rtx (i2src);
3844 else
3845 i2src = NULL;
3846
3847 /* Get NEWDEST as a register in the proper mode. We have already
3848 validated that we can do this. */
3849 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3850 {
3851 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3852 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3853 else
3854 {
3855 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3856 newdest = regno_reg_rtx[REGNO (i2dest)];
3857 }
3858 }
3859
3860 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3861 an ASHIFT. This can occur if it was inside a PLUS and hence
3862 appeared to be a memory address. This is a kludge. */
3863 if (split_code == MULT
3864 && CONST_INT_P (XEXP (*split, 1))
3865 && INTVAL (XEXP (*split, 1)) > 0
3866 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3867 {
3868 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3869 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3870 XEXP (*split, 0), i_rtx));
3871 /* Update split_code because we may not have a multiply
3872 anymore. */
3873 split_code = GET_CODE (*split);
3874 }
3875
3876 /* Similarly for (plus (mult FOO (const_int pow2))). */
3877 if (split_code == PLUS
3878 && GET_CODE (XEXP (*split, 0)) == MULT
3879 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3880 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3881 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3882 {
3883 rtx nsplit = XEXP (*split, 0);
3884 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3885 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3886 XEXP (nsplit, 0),
3887 i_rtx));
3888 /* Update split_code because we may not have a multiply
3889 anymore. */
3890 split_code = GET_CODE (*split);
3891 }
3892
3893 #ifdef INSN_SCHEDULING
3894 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3895 be written as a ZERO_EXTEND. */
3896 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3897 {
3898 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3899 what it really is. */
3900 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3901 == SIGN_EXTEND)
3902 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3903 SUBREG_REG (*split)));
3904 else
3905 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3906 SUBREG_REG (*split)));
3907 }
3908 #endif
3909
3910 /* Attempt to split binary operators using arithmetic identities. */
3911 if (BINARY_P (SET_SRC (newpat))
3912 && split_mode == GET_MODE (SET_SRC (newpat))
3913 && ! side_effects_p (SET_SRC (newpat)))
3914 {
3915 rtx setsrc = SET_SRC (newpat);
3916 machine_mode mode = GET_MODE (setsrc);
3917 enum rtx_code code = GET_CODE (setsrc);
3918 rtx src_op0 = XEXP (setsrc, 0);
3919 rtx src_op1 = XEXP (setsrc, 1);
3920
3921 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3922 if (rtx_equal_p (src_op0, src_op1))
3923 {
3924 newi2pat = gen_rtx_SET (newdest, src_op0);
3925 SUBST (XEXP (setsrc, 0), newdest);
3926 SUBST (XEXP (setsrc, 1), newdest);
3927 subst_done = true;
3928 }
3929 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3930 else if ((code == PLUS || code == MULT)
3931 && GET_CODE (src_op0) == code
3932 && GET_CODE (XEXP (src_op0, 0)) == code
3933 && (INTEGRAL_MODE_P (mode)
3934 || (FLOAT_MODE_P (mode)
3935 && flag_unsafe_math_optimizations)))
3936 {
3937 rtx p = XEXP (XEXP (src_op0, 0), 0);
3938 rtx q = XEXP (XEXP (src_op0, 0), 1);
3939 rtx r = XEXP (src_op0, 1);
3940 rtx s = src_op1;
3941
3942 /* Split both "((X op Y) op X) op Y" and
3943 "((X op Y) op Y) op X" as "T op T" where T is
3944 "X op Y". */
3945 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3946 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3947 {
3948 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3949 SUBST (XEXP (setsrc, 0), newdest);
3950 SUBST (XEXP (setsrc, 1), newdest);
3951 subst_done = true;
3952 }
3953 /* Split "((X op X) op Y) op Y)" as "T op T" where
3954 T is "X op Y". */
3955 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3956 {
3957 rtx tmp = simplify_gen_binary (code, mode, p, r);
3958 newi2pat = gen_rtx_SET (newdest, tmp);
3959 SUBST (XEXP (setsrc, 0), newdest);
3960 SUBST (XEXP (setsrc, 1), newdest);
3961 subst_done = true;
3962 }
3963 }
3964 }
3965
3966 if (!subst_done)
3967 {
3968 newi2pat = gen_rtx_SET (newdest, *split);
3969 SUBST (*split, newdest);
3970 }
3971
3972 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3973
3974 /* recog_for_combine might have added CLOBBERs to newi2pat.
3975 Make sure NEWPAT does not depend on the clobbered regs. */
3976 if (GET_CODE (newi2pat) == PARALLEL)
3977 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3978 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3979 {
3980 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3981 if (reg_overlap_mentioned_p (reg, newpat))
3982 {
3983 undo_all ();
3984 return 0;
3985 }
3986 }
3987
3988 /* If the split point was a MULT and we didn't have one before,
3989 don't use one now. */
3990 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3991 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3992 }
3993 }
3994
3995 /* Check for a case where we loaded from memory in a narrow mode and
3996 then sign extended it, but we need both registers. In that case,
3997 we have a PARALLEL with both loads from the same memory location.
3998 We can split this into a load from memory followed by a register-register
3999 copy. This saves at least one insn, more if register allocation can
4000 eliminate the copy.
4001
4002 We cannot do this if the destination of the first assignment is a
4003 condition code register or cc0. We eliminate this case by making sure
4004 the SET_DEST and SET_SRC have the same mode.
4005
4006 We cannot do this if the destination of the second assignment is
4007 a register that we have already assumed is zero-extended. Similarly
4008 for a SUBREG of such a register. */
4009
4010 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
4011 && GET_CODE (newpat) == PARALLEL
4012 && XVECLEN (newpat, 0) == 2
4013 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4014 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
4015 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
4016 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
4017 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4018 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
4019 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
4020 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
4021 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4022 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4023 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
4024 (REG_P (temp_expr)
4025 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4026 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4027 BITS_PER_WORD)
4028 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4029 HOST_BITS_PER_INT)
4030 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4031 != GET_MODE_MASK (word_mode))))
4032 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
4033 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
4034 (REG_P (temp_expr)
4035 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4036 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4037 BITS_PER_WORD)
4038 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4039 HOST_BITS_PER_INT)
4040 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4041 != GET_MODE_MASK (word_mode)))))
4042 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4043 SET_SRC (XVECEXP (newpat, 0, 1)))
4044 && ! find_reg_note (i3, REG_UNUSED,
4045 SET_DEST (XVECEXP (newpat, 0, 0))))
4046 {
4047 rtx ni2dest;
4048
4049 newi2pat = XVECEXP (newpat, 0, 0);
4050 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
4051 newpat = XVECEXP (newpat, 0, 1);
4052 SUBST (SET_SRC (newpat),
4053 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4054 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4055
4056 if (i2_code_number >= 0)
4057 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4058
4059 if (insn_code_number >= 0)
4060 swap_i2i3 = 1;
4061 }
4062
4063 /* Similarly, check for a case where we have a PARALLEL of two independent
4064 SETs but we started with three insns. In this case, we can do the sets
4065 as two separate insns. This case occurs when some SET allows two
4066 other insns to combine, but the destination of that SET is still live.
4067
4068 Also do this if we started with two insns and (at least) one of the
4069 resulting sets is a noop; this noop will be deleted later.
4070
4071 Also do this if we started with two insns neither of which was a simple
4072 move. */
4073
4074 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4075 && GET_CODE (newpat) == PARALLEL
4076 && XVECLEN (newpat, 0) == 2
4077 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4078 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4079 && (i1
4080 || set_noop_p (XVECEXP (newpat, 0, 0))
4081 || set_noop_p (XVECEXP (newpat, 0, 1))
4082 || (!i2_was_move && !i3_was_move))
4083 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4084 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4085 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4086 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4087 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4088 XVECEXP (newpat, 0, 0))
4089 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4090 XVECEXP (newpat, 0, 1))
4091 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4092 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4093 {
4094 rtx set0 = XVECEXP (newpat, 0, 0);
4095 rtx set1 = XVECEXP (newpat, 0, 1);
4096
4097 /* Normally, it doesn't matter which of the two is done first,
4098 but the one that references cc0 can't be the second, and
4099 one which uses any regs/memory set in between i2 and i3 can't
4100 be first. The PARALLEL might also have been pre-existing in i3,
4101 so we need to make sure that we won't wrongly hoist a SET to i2
4102 that would conflict with a death note present in there, or would
4103 have its dest modified between i2 and i3. */
4104 if (!modified_between_p (SET_SRC (set1), i2, i3)
4105 && !(REG_P (SET_DEST (set1))
4106 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4107 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4108 && find_reg_note (i2, REG_DEAD,
4109 SUBREG_REG (SET_DEST (set1))))
4110 && !modified_between_p (SET_DEST (set1), i2, i3)
4111 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4112 /* If I3 is a jump, ensure that set0 is a jump so that
4113 we do not create invalid RTL. */
4114 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4115 )
4116 {
4117 newi2pat = set1;
4118 newpat = set0;
4119 }
4120 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4121 && !(REG_P (SET_DEST (set0))
4122 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4123 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4124 && find_reg_note (i2, REG_DEAD,
4125 SUBREG_REG (SET_DEST (set0))))
4126 && !modified_between_p (SET_DEST (set0), i2, i3)
4127 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4128 /* If I3 is a jump, ensure that set1 is a jump so that
4129 we do not create invalid RTL. */
4130 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4131 )
4132 {
4133 newi2pat = set0;
4134 newpat = set1;
4135 }
4136 else
4137 {
4138 undo_all ();
4139 return 0;
4140 }
4141
4142 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4143
4144 if (i2_code_number >= 0)
4145 {
4146 /* recog_for_combine might have added CLOBBERs to newi2pat.
4147 Make sure NEWPAT does not depend on the clobbered regs. */
4148 if (GET_CODE (newi2pat) == PARALLEL)
4149 {
4150 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4151 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4152 {
4153 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4154 if (reg_overlap_mentioned_p (reg, newpat))
4155 {
4156 undo_all ();
4157 return 0;
4158 }
4159 }
4160 }
4161
4162 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4163
4164 if (insn_code_number >= 0)
4165 split_i2i3 = 1;
4166 }
4167 }
4168
4169 /* If it still isn't recognized, fail and change things back the way they
4170 were. */
4171 if ((insn_code_number < 0
4172 /* Is the result a reasonable ASM_OPERANDS? */
4173 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4174 {
4175 undo_all ();
4176 return 0;
4177 }
4178
4179 /* If we had to change another insn, make sure it is valid also. */
4180 if (undobuf.other_insn)
4181 {
4182 CLEAR_HARD_REG_SET (newpat_used_regs);
4183
4184 other_pat = PATTERN (undobuf.other_insn);
4185 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4186 &new_other_notes);
4187
4188 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4189 {
4190 undo_all ();
4191 return 0;
4192 }
4193 }
4194
4195 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4196 they are adjacent to each other or not. */
4197 if (HAVE_cc0)
4198 {
4199 rtx_insn *p = prev_nonnote_insn (i3);
4200 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4201 && sets_cc0_p (newi2pat))
4202 {
4203 undo_all ();
4204 return 0;
4205 }
4206 }
4207
4208 /* Only allow this combination if insn_cost reports that the
4209 replacement instructions are cheaper than the originals. */
4210 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4211 {
4212 undo_all ();
4213 return 0;
4214 }
4215
4216 if (MAY_HAVE_DEBUG_BIND_INSNS)
4217 {
4218 struct undo *undo;
4219
4220 for (undo = undobuf.undos; undo; undo = undo->next)
4221 if (undo->kind == UNDO_MODE)
4222 {
4223 rtx reg = *undo->where.r;
4224 machine_mode new_mode = GET_MODE (reg);
4225 machine_mode old_mode = undo->old_contents.m;
4226
4227 /* Temporarily revert mode back. */
4228 adjust_reg_mode (reg, old_mode);
4229
4230 if (reg == i2dest && i2scratch)
4231 {
4232 /* If we used i2dest as a scratch register with a
4233 different mode, substitute it for the original
4234 i2src while its original mode is temporarily
4235 restored, and then clear i2scratch so that we don't
4236 do it again later. */
4237 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4238 this_basic_block);
4239 i2scratch = false;
4240 /* Put back the new mode. */
4241 adjust_reg_mode (reg, new_mode);
4242 }
4243 else
4244 {
4245 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4246 rtx_insn *first, *last;
4247
4248 if (reg == i2dest)
4249 {
4250 first = i2;
4251 last = last_combined_insn;
4252 }
4253 else
4254 {
4255 first = i3;
4256 last = undobuf.other_insn;
4257 gcc_assert (last);
4258 if (DF_INSN_LUID (last)
4259 < DF_INSN_LUID (last_combined_insn))
4260 last = last_combined_insn;
4261 }
4262
4263 /* We're dealing with a reg that changed mode but not
4264 meaning, so we want to turn it into a subreg for
4265 the new mode. However, because of REG sharing and
4266 because its mode had already changed, we have to do
4267 it in two steps. First, replace any debug uses of
4268 reg, with its original mode temporarily restored,
4269 with this copy we have created; then, replace the
4270 copy with the SUBREG of the original shared reg,
4271 once again changed to the new mode. */
4272 propagate_for_debug (first, last, reg, tempreg,
4273 this_basic_block);
4274 adjust_reg_mode (reg, new_mode);
4275 propagate_for_debug (first, last, tempreg,
4276 lowpart_subreg (old_mode, reg, new_mode),
4277 this_basic_block);
4278 }
4279 }
4280 }
4281
4282 /* If we will be able to accept this, we have made a
4283 change to the destination of I3. This requires us to
4284 do a few adjustments. */
4285
4286 if (changed_i3_dest)
4287 {
4288 PATTERN (i3) = newpat;
4289 adjust_for_new_dest (i3);
4290 }
4291
4292 /* We now know that we can do this combination. Merge the insns and
4293 update the status of registers and LOG_LINKS. */
4294
4295 if (undobuf.other_insn)
4296 {
4297 rtx note, next;
4298
4299 PATTERN (undobuf.other_insn) = other_pat;
4300
4301 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4302 ensure that they are still valid. Then add any non-duplicate
4303 notes added by recog_for_combine. */
4304 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4305 {
4306 next = XEXP (note, 1);
4307
4308 if ((REG_NOTE_KIND (note) == REG_DEAD
4309 && !reg_referenced_p (XEXP (note, 0),
4310 PATTERN (undobuf.other_insn)))
4311 ||(REG_NOTE_KIND (note) == REG_UNUSED
4312 && !reg_set_p (XEXP (note, 0),
4313 PATTERN (undobuf.other_insn)))
4314 /* Simply drop equal note since it may be no longer valid
4315 for other_insn. It may be possible to record that CC
4316 register is changed and only discard those notes, but
4317 in practice it's unnecessary complication and doesn't
4318 give any meaningful improvement.
4319
4320 See PR78559. */
4321 || REG_NOTE_KIND (note) == REG_EQUAL
4322 || REG_NOTE_KIND (note) == REG_EQUIV)
4323 remove_note (undobuf.other_insn, note);
4324 }
4325
4326 distribute_notes (new_other_notes, undobuf.other_insn,
4327 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4328 NULL_RTX);
4329 }
4330
4331 if (swap_i2i3)
4332 {
4333 /* I3 now uses what used to be its destination and which is now
4334 I2's destination. This requires us to do a few adjustments. */
4335 PATTERN (i3) = newpat;
4336 adjust_for_new_dest (i3);
4337 }
4338
4339 if (swap_i2i3 || split_i2i3)
4340 {
4341 /* We might need a LOG_LINK from I3 to I2. But then we used to
4342 have one, so we still will.
4343
4344 However, some later insn might be using I2's dest and have
4345 a LOG_LINK pointing at I3. We should change it to point at
4346 I2 instead. */
4347
4348 /* newi2pat is usually a SET here; however, recog_for_combine might
4349 have added some clobbers. */
4350 rtx x = newi2pat;
4351 if (GET_CODE (x) == PARALLEL)
4352 x = XVECEXP (newi2pat, 0, 0);
4353
4354 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4355 unsigned int regno = reg_or_subregno (SET_DEST (x));
4356
4357 bool done = false;
4358 for (rtx_insn *insn = NEXT_INSN (i3);
4359 !done
4360 && insn
4361 && NONDEBUG_INSN_P (insn)
4362 && BLOCK_FOR_INSN (insn) == this_basic_block;
4363 insn = NEXT_INSN (insn))
4364 {
4365 struct insn_link *link;
4366 FOR_EACH_LOG_LINK (link, insn)
4367 if (link->insn == i3 && link->regno == regno)
4368 {
4369 link->insn = i2;
4370 done = true;
4371 break;
4372 }
4373 }
4374 }
4375
4376 {
4377 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4378 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4379 rtx midnotes = 0;
4380 int from_luid;
4381 /* Compute which registers we expect to eliminate. newi2pat may be setting
4382 either i3dest or i2dest, so we must check it. */
4383 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4384 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4385 || !i2dest_killed
4386 ? 0 : i2dest);
4387 /* For i1, we need to compute both local elimination and global
4388 elimination information with respect to newi2pat because i1dest
4389 may be the same as i3dest, in which case newi2pat may be setting
4390 i1dest. Global information is used when distributing REG_DEAD
4391 note for i2 and i3, in which case it does matter if newi2pat sets
4392 i1dest or not.
4393
4394 Local information is used when distributing REG_DEAD note for i1,
4395 in which case it doesn't matter if newi2pat sets i1dest or not.
4396 See PR62151, if we have four insns combination:
4397 i0: r0 <- i0src
4398 i1: r1 <- i1src (using r0)
4399 REG_DEAD (r0)
4400 i2: r0 <- i2src (using r1)
4401 i3: r3 <- i3src (using r0)
4402 ix: using r0
4403 From i1's point of view, r0 is eliminated, no matter if it is set
4404 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4405 should be discarded.
4406
4407 Note local information only affects cases in forms like "I1->I2->I3",
4408 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4409 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4410 i0dest anyway. */
4411 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4412 || !i1dest_killed
4413 ? 0 : i1dest);
4414 rtx elim_i1 = (local_elim_i1 == 0
4415 || (newi2pat && reg_set_p (i1dest, newi2pat))
4416 ? 0 : i1dest);
4417 /* Same case as i1. */
4418 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4419 ? 0 : i0dest);
4420 rtx elim_i0 = (local_elim_i0 == 0
4421 || (newi2pat && reg_set_p (i0dest, newi2pat))
4422 ? 0 : i0dest);
4423
4424 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4425 clear them. */
4426 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4427 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4428 if (i1)
4429 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4430 if (i0)
4431 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4432
4433 /* Ensure that we do not have something that should not be shared but
4434 occurs multiple times in the new insns. Check this by first
4435 resetting all the `used' flags and then copying anything is shared. */
4436
4437 reset_used_flags (i3notes);
4438 reset_used_flags (i2notes);
4439 reset_used_flags (i1notes);
4440 reset_used_flags (i0notes);
4441 reset_used_flags (newpat);
4442 reset_used_flags (newi2pat);
4443 if (undobuf.other_insn)
4444 reset_used_flags (PATTERN (undobuf.other_insn));
4445
4446 i3notes = copy_rtx_if_shared (i3notes);
4447 i2notes = copy_rtx_if_shared (i2notes);
4448 i1notes = copy_rtx_if_shared (i1notes);
4449 i0notes = copy_rtx_if_shared (i0notes);
4450 newpat = copy_rtx_if_shared (newpat);
4451 newi2pat = copy_rtx_if_shared (newi2pat);
4452 if (undobuf.other_insn)
4453 reset_used_flags (PATTERN (undobuf.other_insn));
4454
4455 INSN_CODE (i3) = insn_code_number;
4456 PATTERN (i3) = newpat;
4457
4458 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4459 {
4460 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4461 link = XEXP (link, 1))
4462 {
4463 if (substed_i2)
4464 {
4465 /* I2SRC must still be meaningful at this point. Some
4466 splitting operations can invalidate I2SRC, but those
4467 operations do not apply to calls. */
4468 gcc_assert (i2src);
4469 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4470 i2dest, i2src);
4471 }
4472 if (substed_i1)
4473 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4474 i1dest, i1src);
4475 if (substed_i0)
4476 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4477 i0dest, i0src);
4478 }
4479 }
4480
4481 if (undobuf.other_insn)
4482 INSN_CODE (undobuf.other_insn) = other_code_number;
4483
4484 /* We had one special case above where I2 had more than one set and
4485 we replaced a destination of one of those sets with the destination
4486 of I3. In that case, we have to update LOG_LINKS of insns later
4487 in this basic block. Note that this (expensive) case is rare.
4488
4489 Also, in this case, we must pretend that all REG_NOTEs for I2
4490 actually came from I3, so that REG_UNUSED notes from I2 will be
4491 properly handled. */
4492
4493 if (i3_subst_into_i2)
4494 {
4495 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4496 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4497 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4498 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4499 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4500 && ! find_reg_note (i2, REG_UNUSED,
4501 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4502 for (temp_insn = NEXT_INSN (i2);
4503 temp_insn
4504 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4505 || BB_HEAD (this_basic_block) != temp_insn);
4506 temp_insn = NEXT_INSN (temp_insn))
4507 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4508 FOR_EACH_LOG_LINK (link, temp_insn)
4509 if (link->insn == i2)
4510 link->insn = i3;
4511
4512 if (i3notes)
4513 {
4514 rtx link = i3notes;
4515 while (XEXP (link, 1))
4516 link = XEXP (link, 1);
4517 XEXP (link, 1) = i2notes;
4518 }
4519 else
4520 i3notes = i2notes;
4521 i2notes = 0;
4522 }
4523
4524 LOG_LINKS (i3) = NULL;
4525 REG_NOTES (i3) = 0;
4526 LOG_LINKS (i2) = NULL;
4527 REG_NOTES (i2) = 0;
4528
4529 if (newi2pat)
4530 {
4531 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4532 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4533 this_basic_block);
4534 INSN_CODE (i2) = i2_code_number;
4535 PATTERN (i2) = newi2pat;
4536 }
4537 else
4538 {
4539 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4540 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4541 this_basic_block);
4542 SET_INSN_DELETED (i2);
4543 }
4544
4545 if (i1)
4546 {
4547 LOG_LINKS (i1) = NULL;
4548 REG_NOTES (i1) = 0;
4549 if (MAY_HAVE_DEBUG_BIND_INSNS)
4550 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4551 this_basic_block);
4552 SET_INSN_DELETED (i1);
4553 }
4554
4555 if (i0)
4556 {
4557 LOG_LINKS (i0) = NULL;
4558 REG_NOTES (i0) = 0;
4559 if (MAY_HAVE_DEBUG_BIND_INSNS)
4560 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4561 this_basic_block);
4562 SET_INSN_DELETED (i0);
4563 }
4564
4565 /* Get death notes for everything that is now used in either I3 or
4566 I2 and used to die in a previous insn. If we built two new
4567 patterns, move from I1 to I2 then I2 to I3 so that we get the
4568 proper movement on registers that I2 modifies. */
4569
4570 if (i0)
4571 from_luid = DF_INSN_LUID (i0);
4572 else if (i1)
4573 from_luid = DF_INSN_LUID (i1);
4574 else
4575 from_luid = DF_INSN_LUID (i2);
4576 if (newi2pat)
4577 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4578 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4579
4580 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4581 if (i3notes)
4582 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4583 elim_i2, elim_i1, elim_i0);
4584 if (i2notes)
4585 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4586 elim_i2, elim_i1, elim_i0);
4587 if (i1notes)
4588 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4589 elim_i2, local_elim_i1, local_elim_i0);
4590 if (i0notes)
4591 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4592 elim_i2, elim_i1, local_elim_i0);
4593 if (midnotes)
4594 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4595 elim_i2, elim_i1, elim_i0);
4596
4597 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4598 know these are REG_UNUSED and want them to go to the desired insn,
4599 so we always pass it as i3. */
4600
4601 if (newi2pat && new_i2_notes)
4602 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4603 NULL_RTX);
4604
4605 if (new_i3_notes)
4606 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4607 NULL_RTX);
4608
4609 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4610 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4611 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4612 in that case, it might delete I2. Similarly for I2 and I1.
4613 Show an additional death due to the REG_DEAD note we make here. If
4614 we discard it in distribute_notes, we will decrement it again. */
4615
4616 if (i3dest_killed)
4617 {
4618 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4619 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4620 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4621 elim_i1, elim_i0);
4622 else
4623 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4624 elim_i2, elim_i1, elim_i0);
4625 }
4626
4627 if (i2dest_in_i2src)
4628 {
4629 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4630 if (newi2pat && reg_set_p (i2dest, newi2pat))
4631 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4632 NULL_RTX, NULL_RTX);
4633 else
4634 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4635 NULL_RTX, NULL_RTX, NULL_RTX);
4636 }
4637
4638 if (i1dest_in_i1src)
4639 {
4640 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4641 if (newi2pat && reg_set_p (i1dest, newi2pat))
4642 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4643 NULL_RTX, NULL_RTX);
4644 else
4645 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4646 NULL_RTX, NULL_RTX, NULL_RTX);
4647 }
4648
4649 if (i0dest_in_i0src)
4650 {
4651 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4652 if (newi2pat && reg_set_p (i0dest, newi2pat))
4653 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4654 NULL_RTX, NULL_RTX);
4655 else
4656 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4657 NULL_RTX, NULL_RTX, NULL_RTX);
4658 }
4659
4660 distribute_links (i3links);
4661 distribute_links (i2links);
4662 distribute_links (i1links);
4663 distribute_links (i0links);
4664
4665 if (REG_P (i2dest))
4666 {
4667 struct insn_link *link;
4668 rtx_insn *i2_insn = 0;
4669 rtx i2_val = 0, set;
4670
4671 /* The insn that used to set this register doesn't exist, and
4672 this life of the register may not exist either. See if one of
4673 I3's links points to an insn that sets I2DEST. If it does,
4674 that is now the last known value for I2DEST. If we don't update
4675 this and I2 set the register to a value that depended on its old
4676 contents, we will get confused. If this insn is used, thing
4677 will be set correctly in combine_instructions. */
4678 FOR_EACH_LOG_LINK (link, i3)
4679 if ((set = single_set (link->insn)) != 0
4680 && rtx_equal_p (i2dest, SET_DEST (set)))
4681 i2_insn = link->insn, i2_val = SET_SRC (set);
4682
4683 record_value_for_reg (i2dest, i2_insn, i2_val);
4684
4685 /* If the reg formerly set in I2 died only once and that was in I3,
4686 zero its use count so it won't make `reload' do any work. */
4687 if (! added_sets_2
4688 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4689 && ! i2dest_in_i2src
4690 && REGNO (i2dest) < reg_n_sets_max)
4691 INC_REG_N_SETS (REGNO (i2dest), -1);
4692 }
4693
4694 if (i1 && REG_P (i1dest))
4695 {
4696 struct insn_link *link;
4697 rtx_insn *i1_insn = 0;
4698 rtx i1_val = 0, set;
4699
4700 FOR_EACH_LOG_LINK (link, i3)
4701 if ((set = single_set (link->insn)) != 0
4702 && rtx_equal_p (i1dest, SET_DEST (set)))
4703 i1_insn = link->insn, i1_val = SET_SRC (set);
4704
4705 record_value_for_reg (i1dest, i1_insn, i1_val);
4706
4707 if (! added_sets_1
4708 && ! i1dest_in_i1src
4709 && REGNO (i1dest) < reg_n_sets_max)
4710 INC_REG_N_SETS (REGNO (i1dest), -1);
4711 }
4712
4713 if (i0 && REG_P (i0dest))
4714 {
4715 struct insn_link *link;
4716 rtx_insn *i0_insn = 0;
4717 rtx i0_val = 0, set;
4718
4719 FOR_EACH_LOG_LINK (link, i3)
4720 if ((set = single_set (link->insn)) != 0
4721 && rtx_equal_p (i0dest, SET_DEST (set)))
4722 i0_insn = link->insn, i0_val = SET_SRC (set);
4723
4724 record_value_for_reg (i0dest, i0_insn, i0_val);
4725
4726 if (! added_sets_0
4727 && ! i0dest_in_i0src
4728 && REGNO (i0dest) < reg_n_sets_max)
4729 INC_REG_N_SETS (REGNO (i0dest), -1);
4730 }
4731
4732 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4733 been made to this insn. The order is important, because newi2pat
4734 can affect nonzero_bits of newpat. */
4735 if (newi2pat)
4736 note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4737 note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4738 }
4739
4740 if (undobuf.other_insn != NULL_RTX)
4741 {
4742 if (dump_file)
4743 {
4744 fprintf (dump_file, "modifying other_insn ");
4745 dump_insn_slim (dump_file, undobuf.other_insn);
4746 }
4747 df_insn_rescan (undobuf.other_insn);
4748 }
4749
4750 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4751 {
4752 if (dump_file)
4753 {
4754 fprintf (dump_file, "modifying insn i0 ");
4755 dump_insn_slim (dump_file, i0);
4756 }
4757 df_insn_rescan (i0);
4758 }
4759
4760 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4761 {
4762 if (dump_file)
4763 {
4764 fprintf (dump_file, "modifying insn i1 ");
4765 dump_insn_slim (dump_file, i1);
4766 }
4767 df_insn_rescan (i1);
4768 }
4769
4770 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4771 {
4772 if (dump_file)
4773 {
4774 fprintf (dump_file, "modifying insn i2 ");
4775 dump_insn_slim (dump_file, i2);
4776 }
4777 df_insn_rescan (i2);
4778 }
4779
4780 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4781 {
4782 if (dump_file)
4783 {
4784 fprintf (dump_file, "modifying insn i3 ");
4785 dump_insn_slim (dump_file, i3);
4786 }
4787 df_insn_rescan (i3);
4788 }
4789
4790 /* Set new_direct_jump_p if a new return or simple jump instruction
4791 has been created. Adjust the CFG accordingly. */
4792 if (returnjump_p (i3) || any_uncondjump_p (i3))
4793 {
4794 *new_direct_jump_p = 1;
4795 mark_jump_label (PATTERN (i3), i3, 0);
4796 update_cfg_for_uncondjump (i3);
4797 }
4798
4799 if (undobuf.other_insn != NULL_RTX
4800 && (returnjump_p (undobuf.other_insn)
4801 || any_uncondjump_p (undobuf.other_insn)))
4802 {
4803 *new_direct_jump_p = 1;
4804 update_cfg_for_uncondjump (undobuf.other_insn);
4805 }
4806
4807 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4808 && XEXP (PATTERN (i3), 0) == const1_rtx)
4809 {
4810 basic_block bb = BLOCK_FOR_INSN (i3);
4811 gcc_assert (bb);
4812 remove_edge (split_block (bb, i3));
4813 emit_barrier_after_bb (bb);
4814 *new_direct_jump_p = 1;
4815 }
4816
4817 if (undobuf.other_insn
4818 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4819 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4820 {
4821 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4822 gcc_assert (bb);
4823 remove_edge (split_block (bb, undobuf.other_insn));
4824 emit_barrier_after_bb (bb);
4825 *new_direct_jump_p = 1;
4826 }
4827
4828 /* A noop might also need cleaning up of CFG, if it comes from the
4829 simplification of a jump. */
4830 if (JUMP_P (i3)
4831 && GET_CODE (newpat) == SET
4832 && SET_SRC (newpat) == pc_rtx
4833 && SET_DEST (newpat) == pc_rtx)
4834 {
4835 *new_direct_jump_p = 1;
4836 update_cfg_for_uncondjump (i3);
4837 }
4838
4839 if (undobuf.other_insn != NULL_RTX
4840 && JUMP_P (undobuf.other_insn)
4841 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4842 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4843 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4844 {
4845 *new_direct_jump_p = 1;
4846 update_cfg_for_uncondjump (undobuf.other_insn);
4847 }
4848
4849 combine_successes++;
4850 undo_commit ();
4851
4852 rtx_insn *ret = newi2pat ? i2 : i3;
4853 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4854 ret = added_links_insn;
4855 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4856 ret = added_notes_insn;
4857
4858 return ret;
4859 }
4860 \f
4861 /* Get a marker for undoing to the current state. */
4862
4863 static void *
4864 get_undo_marker (void)
4865 {
4866 return undobuf.undos;
4867 }
4868
4869 /* Undo the modifications up to the marker. */
4870
4871 static void
4872 undo_to_marker (void *marker)
4873 {
4874 struct undo *undo, *next;
4875
4876 for (undo = undobuf.undos; undo != marker; undo = next)
4877 {
4878 gcc_assert (undo);
4879
4880 next = undo->next;
4881 switch (undo->kind)
4882 {
4883 case UNDO_RTX:
4884 *undo->where.r = undo->old_contents.r;
4885 break;
4886 case UNDO_INT:
4887 *undo->where.i = undo->old_contents.i;
4888 break;
4889 case UNDO_MODE:
4890 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4891 break;
4892 case UNDO_LINKS:
4893 *undo->where.l = undo->old_contents.l;
4894 break;
4895 default:
4896 gcc_unreachable ();
4897 }
4898
4899 undo->next = undobuf.frees;
4900 undobuf.frees = undo;
4901 }
4902
4903 undobuf.undos = (struct undo *) marker;
4904 }
4905
4906 /* Undo all the modifications recorded in undobuf. */
4907
4908 static void
4909 undo_all (void)
4910 {
4911 undo_to_marker (0);
4912 }
4913
4914 /* We've committed to accepting the changes we made. Move all
4915 of the undos to the free list. */
4916
4917 static void
4918 undo_commit (void)
4919 {
4920 struct undo *undo, *next;
4921
4922 for (undo = undobuf.undos; undo; undo = next)
4923 {
4924 next = undo->next;
4925 undo->next = undobuf.frees;
4926 undobuf.frees = undo;
4927 }
4928 undobuf.undos = 0;
4929 }
4930 \f
4931 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4932 where we have an arithmetic expression and return that point. LOC will
4933 be inside INSN.
4934
4935 try_combine will call this function to see if an insn can be split into
4936 two insns. */
4937
4938 static rtx *
4939 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4940 {
4941 rtx x = *loc;
4942 enum rtx_code code = GET_CODE (x);
4943 rtx *split;
4944 unsigned HOST_WIDE_INT len = 0;
4945 HOST_WIDE_INT pos = 0;
4946 int unsignedp = 0;
4947 rtx inner = NULL_RTX;
4948 scalar_int_mode mode, inner_mode;
4949
4950 /* First special-case some codes. */
4951 switch (code)
4952 {
4953 case SUBREG:
4954 #ifdef INSN_SCHEDULING
4955 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4956 point. */
4957 if (MEM_P (SUBREG_REG (x)))
4958 return loc;
4959 #endif
4960 return find_split_point (&SUBREG_REG (x), insn, false);
4961
4962 case MEM:
4963 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4964 using LO_SUM and HIGH. */
4965 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4966 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4967 {
4968 machine_mode address_mode = get_address_mode (x);
4969
4970 SUBST (XEXP (x, 0),
4971 gen_rtx_LO_SUM (address_mode,
4972 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4973 XEXP (x, 0)));
4974 return &XEXP (XEXP (x, 0), 0);
4975 }
4976
4977 /* If we have a PLUS whose second operand is a constant and the
4978 address is not valid, perhaps we can split it up using
4979 the machine-specific way to split large constants. We use
4980 the first pseudo-reg (one of the virtual regs) as a placeholder;
4981 it will not remain in the result. */
4982 if (GET_CODE (XEXP (x, 0)) == PLUS
4983 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4984 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4985 MEM_ADDR_SPACE (x)))
4986 {
4987 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4988 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4989 subst_insn);
4990
4991 /* This should have produced two insns, each of which sets our
4992 placeholder. If the source of the second is a valid address,
4993 we can put both sources together and make a split point
4994 in the middle. */
4995
4996 if (seq
4997 && NEXT_INSN (seq) != NULL_RTX
4998 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4999 && NONJUMP_INSN_P (seq)
5000 && GET_CODE (PATTERN (seq)) == SET
5001 && SET_DEST (PATTERN (seq)) == reg
5002 && ! reg_mentioned_p (reg,
5003 SET_SRC (PATTERN (seq)))
5004 && NONJUMP_INSN_P (NEXT_INSN (seq))
5005 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
5006 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
5007 && memory_address_addr_space_p
5008 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
5009 MEM_ADDR_SPACE (x)))
5010 {
5011 rtx src1 = SET_SRC (PATTERN (seq));
5012 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
5013
5014 /* Replace the placeholder in SRC2 with SRC1. If we can
5015 find where in SRC2 it was placed, that can become our
5016 split point and we can replace this address with SRC2.
5017 Just try two obvious places. */
5018
5019 src2 = replace_rtx (src2, reg, src1);
5020 split = 0;
5021 if (XEXP (src2, 0) == src1)
5022 split = &XEXP (src2, 0);
5023 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
5024 && XEXP (XEXP (src2, 0), 0) == src1)
5025 split = &XEXP (XEXP (src2, 0), 0);
5026
5027 if (split)
5028 {
5029 SUBST (XEXP (x, 0), src2);
5030 return split;
5031 }
5032 }
5033
5034 /* If that didn't work and we have a nested plus, like:
5035 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5036 is valid address, try to split (REG1 * CONST1). */
5037 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5038 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5039 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5040 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
5041 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5042 0), 0)))))
5043 {
5044 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
5045 XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
5046 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5047 MEM_ADDR_SPACE (x)))
5048 {
5049 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5050 return &XEXP (XEXP (XEXP (x, 0), 0), 0);
5051 }
5052 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5053 }
5054 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5055 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5056 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5057 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
5058 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5059 0), 1)))))
5060 {
5061 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
5062 XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
5063 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5064 MEM_ADDR_SPACE (x)))
5065 {
5066 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5067 return &XEXP (XEXP (XEXP (x, 0), 0), 1);
5068 }
5069 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5070 }
5071
5072 /* If that didn't work, perhaps the first operand is complex and
5073 needs to be computed separately, so make a split point there.
5074 This will occur on machines that just support REG + CONST
5075 and have a constant moved through some previous computation. */
5076 if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
5077 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5078 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5079 return &XEXP (XEXP (x, 0), 0);
5080 }
5081
5082 /* If we have a PLUS whose first operand is complex, try computing it
5083 separately by making a split there. */
5084 if (GET_CODE (XEXP (x, 0)) == PLUS
5085 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5086 MEM_ADDR_SPACE (x))
5087 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
5088 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5089 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5090 return &XEXP (XEXP (x, 0), 0);
5091 break;
5092
5093 case SET:
5094 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5095 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5096 we need to put the operand into a register. So split at that
5097 point. */
5098
5099 if (SET_DEST (x) == cc0_rtx
5100 && GET_CODE (SET_SRC (x)) != COMPARE
5101 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5102 && !OBJECT_P (SET_SRC (x))
5103 && ! (GET_CODE (SET_SRC (x)) == SUBREG
5104 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5105 return &SET_SRC (x);
5106
5107 /* See if we can split SET_SRC as it stands. */
5108 split = find_split_point (&SET_SRC (x), insn, true);
5109 if (split && split != &SET_SRC (x))
5110 return split;
5111
5112 /* See if we can split SET_DEST as it stands. */
5113 split = find_split_point (&SET_DEST (x), insn, false);
5114 if (split && split != &SET_DEST (x))
5115 return split;
5116
5117 /* See if this is a bitfield assignment with everything constant. If
5118 so, this is an IOR of an AND, so split it into that. */
5119 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5120 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5121 &inner_mode)
5122 && HWI_COMPUTABLE_MODE_P (inner_mode)
5123 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5124 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5125 && CONST_INT_P (SET_SRC (x))
5126 && ((INTVAL (XEXP (SET_DEST (x), 1))
5127 + INTVAL (XEXP (SET_DEST (x), 2)))
5128 <= GET_MODE_PRECISION (inner_mode))
5129 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5130 {
5131 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5132 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5133 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
5134 rtx dest = XEXP (SET_DEST (x), 0);
5135 unsigned HOST_WIDE_INT mask
5136 = (HOST_WIDE_INT_1U << len) - 1;
5137 rtx or_mask;
5138
5139 if (BITS_BIG_ENDIAN)
5140 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5141
5142 or_mask = gen_int_mode (src << pos, inner_mode);
5143 if (src == mask)
5144 SUBST (SET_SRC (x),
5145 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5146 else
5147 {
5148 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5149 SUBST (SET_SRC (x),
5150 simplify_gen_binary (IOR, inner_mode,
5151 simplify_gen_binary (AND, inner_mode,
5152 dest, negmask),
5153 or_mask));
5154 }
5155
5156 SUBST (SET_DEST (x), dest);
5157
5158 split = find_split_point (&SET_SRC (x), insn, true);
5159 if (split && split != &SET_SRC (x))
5160 return split;
5161 }
5162
5163 /* Otherwise, see if this is an operation that we can split into two.
5164 If so, try to split that. */
5165 code = GET_CODE (SET_SRC (x));
5166
5167 switch (code)
5168 {
5169 case AND:
5170 /* If we are AND'ing with a large constant that is only a single
5171 bit and the result is only being used in a context where we
5172 need to know if it is zero or nonzero, replace it with a bit
5173 extraction. This will avoid the large constant, which might
5174 have taken more than one insn to make. If the constant were
5175 not a valid argument to the AND but took only one insn to make,
5176 this is no worse, but if it took more than one insn, it will
5177 be better. */
5178
5179 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5180 && REG_P (XEXP (SET_SRC (x), 0))
5181 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5182 && REG_P (SET_DEST (x))
5183 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5184 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5185 && XEXP (*split, 0) == SET_DEST (x)
5186 && XEXP (*split, 1) == const0_rtx)
5187 {
5188 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5189 XEXP (SET_SRC (x), 0),
5190 pos, NULL_RTX, 1, 1, 0, 0);
5191 if (extraction != 0)
5192 {
5193 SUBST (SET_SRC (x), extraction);
5194 return find_split_point (loc, insn, false);
5195 }
5196 }
5197 break;
5198
5199 case NE:
5200 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5201 is known to be on, this can be converted into a NEG of a shift. */
5202 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5203 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5204 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5205 GET_MODE (XEXP (SET_SRC (x),
5206 0))))) >= 1))
5207 {
5208 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5209 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5210 SUBST (SET_SRC (x),
5211 gen_rtx_NEG (mode,
5212 gen_rtx_LSHIFTRT (mode,
5213 XEXP (SET_SRC (x), 0),
5214 pos_rtx)));
5215
5216 split = find_split_point (&SET_SRC (x), insn, true);
5217 if (split && split != &SET_SRC (x))
5218 return split;
5219 }
5220 break;
5221
5222 case SIGN_EXTEND:
5223 inner = XEXP (SET_SRC (x), 0);
5224
5225 /* We can't optimize if either mode is a partial integer
5226 mode as we don't know how many bits are significant
5227 in those modes. */
5228 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5229 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5230 break;
5231
5232 pos = 0;
5233 len = GET_MODE_PRECISION (inner_mode);
5234 unsignedp = 0;
5235 break;
5236
5237 case SIGN_EXTRACT:
5238 case ZERO_EXTRACT:
5239 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5240 &inner_mode)
5241 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5242 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5243 {
5244 inner = XEXP (SET_SRC (x), 0);
5245 len = INTVAL (XEXP (SET_SRC (x), 1));
5246 pos = INTVAL (XEXP (SET_SRC (x), 2));
5247
5248 if (BITS_BIG_ENDIAN)
5249 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5250 unsignedp = (code == ZERO_EXTRACT);
5251 }
5252 break;
5253
5254 default:
5255 break;
5256 }
5257
5258 if (len
5259 && known_subrange_p (pos, len,
5260 0, GET_MODE_PRECISION (GET_MODE (inner)))
5261 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5262 {
5263 /* For unsigned, we have a choice of a shift followed by an
5264 AND or two shifts. Use two shifts for field sizes where the
5265 constant might be too large. We assume here that we can
5266 always at least get 8-bit constants in an AND insn, which is
5267 true for every current RISC. */
5268
5269 if (unsignedp && len <= 8)
5270 {
5271 unsigned HOST_WIDE_INT mask
5272 = (HOST_WIDE_INT_1U << len) - 1;
5273 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5274 SUBST (SET_SRC (x),
5275 gen_rtx_AND (mode,
5276 gen_rtx_LSHIFTRT
5277 (mode, gen_lowpart (mode, inner), pos_rtx),
5278 gen_int_mode (mask, mode)));
5279
5280 split = find_split_point (&SET_SRC (x), insn, true);
5281 if (split && split != &SET_SRC (x))
5282 return split;
5283 }
5284 else
5285 {
5286 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5287 int right_bits = GET_MODE_PRECISION (mode) - len;
5288 SUBST (SET_SRC (x),
5289 gen_rtx_fmt_ee
5290 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5291 gen_rtx_ASHIFT (mode,
5292 gen_lowpart (mode, inner),
5293 gen_int_shift_amount (mode, left_bits)),
5294 gen_int_shift_amount (mode, right_bits)));
5295
5296 split = find_split_point (&SET_SRC (x), insn, true);
5297 if (split && split != &SET_SRC (x))
5298 return split;
5299 }
5300 }
5301
5302 /* See if this is a simple operation with a constant as the second
5303 operand. It might be that this constant is out of range and hence
5304 could be used as a split point. */
5305 if (BINARY_P (SET_SRC (x))
5306 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5307 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5308 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5309 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5310 return &XEXP (SET_SRC (x), 1);
5311
5312 /* Finally, see if this is a simple operation with its first operand
5313 not in a register. The operation might require this operand in a
5314 register, so return it as a split point. We can always do this
5315 because if the first operand were another operation, we would have
5316 already found it as a split point. */
5317 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5318 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5319 return &XEXP (SET_SRC (x), 0);
5320
5321 return 0;
5322
5323 case AND:
5324 case IOR:
5325 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5326 it is better to write this as (not (ior A B)) so we can split it.
5327 Similarly for IOR. */
5328 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5329 {
5330 SUBST (*loc,
5331 gen_rtx_NOT (GET_MODE (x),
5332 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5333 GET_MODE (x),
5334 XEXP (XEXP (x, 0), 0),
5335 XEXP (XEXP (x, 1), 0))));
5336 return find_split_point (loc, insn, set_src);
5337 }
5338
5339 /* Many RISC machines have a large set of logical insns. If the
5340 second operand is a NOT, put it first so we will try to split the
5341 other operand first. */
5342 if (GET_CODE (XEXP (x, 1)) == NOT)
5343 {
5344 rtx tem = XEXP (x, 0);
5345 SUBST (XEXP (x, 0), XEXP (x, 1));
5346 SUBST (XEXP (x, 1), tem);
5347 }
5348 break;
5349
5350 case PLUS:
5351 case MINUS:
5352 /* Canonicalization can produce (minus A (mult B C)), where C is a
5353 constant. It may be better to try splitting (plus (mult B -C) A)
5354 instead if this isn't a multiply by a power of two. */
5355 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5356 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5357 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5358 {
5359 machine_mode mode = GET_MODE (x);
5360 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5361 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5362 SUBST (*loc, gen_rtx_PLUS (mode,
5363 gen_rtx_MULT (mode,
5364 XEXP (XEXP (x, 1), 0),
5365 gen_int_mode (other_int,
5366 mode)),
5367 XEXP (x, 0)));
5368 return find_split_point (loc, insn, set_src);
5369 }
5370
5371 /* Split at a multiply-accumulate instruction. However if this is
5372 the SET_SRC, we likely do not have such an instruction and it's
5373 worthless to try this split. */
5374 if (!set_src
5375 && (GET_CODE (XEXP (x, 0)) == MULT
5376 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5377 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5378 return loc;
5379
5380 default:
5381 break;
5382 }
5383
5384 /* Otherwise, select our actions depending on our rtx class. */
5385 switch (GET_RTX_CLASS (code))
5386 {
5387 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5388 case RTX_TERNARY:
5389 split = find_split_point (&XEXP (x, 2), insn, false);
5390 if (split)
5391 return split;
5392 /* fall through */
5393 case RTX_BIN_ARITH:
5394 case RTX_COMM_ARITH:
5395 case RTX_COMPARE:
5396 case RTX_COMM_COMPARE:
5397 split = find_split_point (&XEXP (x, 1), insn, false);
5398 if (split)
5399 return split;
5400 /* fall through */
5401 case RTX_UNARY:
5402 /* Some machines have (and (shift ...) ...) insns. If X is not
5403 an AND, but XEXP (X, 0) is, use it as our split point. */
5404 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5405 return &XEXP (x, 0);
5406
5407 split = find_split_point (&XEXP (x, 0), insn, false);
5408 if (split)
5409 return split;
5410 return loc;
5411
5412 default:
5413 /* Otherwise, we don't have a split point. */
5414 return 0;
5415 }
5416 }
5417 \f
5418 /* Throughout X, replace FROM with TO, and return the result.
5419 The result is TO if X is FROM;
5420 otherwise the result is X, but its contents may have been modified.
5421 If they were modified, a record was made in undobuf so that
5422 undo_all will (among other things) return X to its original state.
5423
5424 If the number of changes necessary is too much to record to undo,
5425 the excess changes are not made, so the result is invalid.
5426 The changes already made can still be undone.
5427 undobuf.num_undo is incremented for such changes, so by testing that
5428 the caller can tell whether the result is valid.
5429
5430 `n_occurrences' is incremented each time FROM is replaced.
5431
5432 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5433
5434 IN_COND is nonzero if we are at the top level of a condition.
5435
5436 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5437 by copying if `n_occurrences' is nonzero. */
5438
5439 static rtx
5440 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5441 {
5442 enum rtx_code code = GET_CODE (x);
5443 machine_mode op0_mode = VOIDmode;
5444 const char *fmt;
5445 int len, i;
5446 rtx new_rtx;
5447
5448 /* Two expressions are equal if they are identical copies of a shared
5449 RTX or if they are both registers with the same register number
5450 and mode. */
5451
5452 #define COMBINE_RTX_EQUAL_P(X,Y) \
5453 ((X) == (Y) \
5454 || (REG_P (X) && REG_P (Y) \
5455 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5456
5457 /* Do not substitute into clobbers of regs -- this will never result in
5458 valid RTL. */
5459 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5460 return x;
5461
5462 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5463 {
5464 n_occurrences++;
5465 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5466 }
5467
5468 /* If X and FROM are the same register but different modes, they
5469 will not have been seen as equal above. However, the log links code
5470 will make a LOG_LINKS entry for that case. If we do nothing, we
5471 will try to rerecognize our original insn and, when it succeeds,
5472 we will delete the feeding insn, which is incorrect.
5473
5474 So force this insn not to match in this (rare) case. */
5475 if (! in_dest && code == REG && REG_P (from)
5476 && reg_overlap_mentioned_p (x, from))
5477 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5478
5479 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5480 of which may contain things that can be combined. */
5481 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5482 return x;
5483
5484 /* It is possible to have a subexpression appear twice in the insn.
5485 Suppose that FROM is a register that appears within TO.
5486 Then, after that subexpression has been scanned once by `subst',
5487 the second time it is scanned, TO may be found. If we were
5488 to scan TO here, we would find FROM within it and create a
5489 self-referent rtl structure which is completely wrong. */
5490 if (COMBINE_RTX_EQUAL_P (x, to))
5491 return to;
5492
5493 /* Parallel asm_operands need special attention because all of the
5494 inputs are shared across the arms. Furthermore, unsharing the
5495 rtl results in recognition failures. Failure to handle this case
5496 specially can result in circular rtl.
5497
5498 Solve this by doing a normal pass across the first entry of the
5499 parallel, and only processing the SET_DESTs of the subsequent
5500 entries. Ug. */
5501
5502 if (code == PARALLEL
5503 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5504 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5505 {
5506 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5507
5508 /* If this substitution failed, this whole thing fails. */
5509 if (GET_CODE (new_rtx) == CLOBBER
5510 && XEXP (new_rtx, 0) == const0_rtx)
5511 return new_rtx;
5512
5513 SUBST (XVECEXP (x, 0, 0), new_rtx);
5514
5515 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5516 {
5517 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5518
5519 if (!REG_P (dest)
5520 && GET_CODE (dest) != CC0
5521 && GET_CODE (dest) != PC)
5522 {
5523 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5524
5525 /* If this substitution failed, this whole thing fails. */
5526 if (GET_CODE (new_rtx) == CLOBBER
5527 && XEXP (new_rtx, 0) == const0_rtx)
5528 return new_rtx;
5529
5530 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5531 }
5532 }
5533 }
5534 else
5535 {
5536 len = GET_RTX_LENGTH (code);
5537 fmt = GET_RTX_FORMAT (code);
5538
5539 /* We don't need to process a SET_DEST that is a register, CC0,
5540 or PC, so set up to skip this common case. All other cases
5541 where we want to suppress replacing something inside a
5542 SET_SRC are handled via the IN_DEST operand. */
5543 if (code == SET
5544 && (REG_P (SET_DEST (x))
5545 || GET_CODE (SET_DEST (x)) == CC0
5546 || GET_CODE (SET_DEST (x)) == PC))
5547 fmt = "ie";
5548
5549 /* Trying to simplify the operands of a widening MULT is not likely
5550 to create RTL matching a machine insn. */
5551 if (code == MULT
5552 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5553 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5554 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5555 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5556 && REG_P (XEXP (XEXP (x, 0), 0))
5557 && REG_P (XEXP (XEXP (x, 1), 0))
5558 && from == to)
5559 return x;
5560
5561
5562 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5563 constant. */
5564 if (fmt[0] == 'e')
5565 op0_mode = GET_MODE (XEXP (x, 0));
5566
5567 for (i = 0; i < len; i++)
5568 {
5569 if (fmt[i] == 'E')
5570 {
5571 int j;
5572 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5573 {
5574 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5575 {
5576 new_rtx = (unique_copy && n_occurrences
5577 ? copy_rtx (to) : to);
5578 n_occurrences++;
5579 }
5580 else
5581 {
5582 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5583 unique_copy);
5584
5585 /* If this substitution failed, this whole thing
5586 fails. */
5587 if (GET_CODE (new_rtx) == CLOBBER
5588 && XEXP (new_rtx, 0) == const0_rtx)
5589 return new_rtx;
5590 }
5591
5592 SUBST (XVECEXP (x, i, j), new_rtx);
5593 }
5594 }
5595 else if (fmt[i] == 'e')
5596 {
5597 /* If this is a register being set, ignore it. */
5598 new_rtx = XEXP (x, i);
5599 if (in_dest
5600 && i == 0
5601 && (((code == SUBREG || code == ZERO_EXTRACT)
5602 && REG_P (new_rtx))
5603 || code == STRICT_LOW_PART))
5604 ;
5605
5606 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5607 {
5608 /* In general, don't install a subreg involving two
5609 modes not tieable. It can worsen register
5610 allocation, and can even make invalid reload
5611 insns, since the reg inside may need to be copied
5612 from in the outside mode, and that may be invalid
5613 if it is an fp reg copied in integer mode.
5614
5615 We allow two exceptions to this: It is valid if
5616 it is inside another SUBREG and the mode of that
5617 SUBREG and the mode of the inside of TO is
5618 tieable and it is valid if X is a SET that copies
5619 FROM to CC0. */
5620
5621 if (GET_CODE (to) == SUBREG
5622 && !targetm.modes_tieable_p (GET_MODE (to),
5623 GET_MODE (SUBREG_REG (to)))
5624 && ! (code == SUBREG
5625 && (targetm.modes_tieable_p
5626 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5627 && (!HAVE_cc0
5628 || (! (code == SET
5629 && i == 1
5630 && XEXP (x, 0) == cc0_rtx))))
5631 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5632
5633 if (code == SUBREG
5634 && REG_P (to)
5635 && REGNO (to) < FIRST_PSEUDO_REGISTER
5636 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5637 SUBREG_BYTE (x),
5638 GET_MODE (x)) < 0)
5639 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5640
5641 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5642 n_occurrences++;
5643 }
5644 else
5645 /* If we are in a SET_DEST, suppress most cases unless we
5646 have gone inside a MEM, in which case we want to
5647 simplify the address. We assume here that things that
5648 are actually part of the destination have their inner
5649 parts in the first expression. This is true for SUBREG,
5650 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5651 things aside from REG and MEM that should appear in a
5652 SET_DEST. */
5653 new_rtx = subst (XEXP (x, i), from, to,
5654 (((in_dest
5655 && (code == SUBREG || code == STRICT_LOW_PART
5656 || code == ZERO_EXTRACT))
5657 || code == SET)
5658 && i == 0),
5659 code == IF_THEN_ELSE && i == 0,
5660 unique_copy);
5661
5662 /* If we found that we will have to reject this combination,
5663 indicate that by returning the CLOBBER ourselves, rather than
5664 an expression containing it. This will speed things up as
5665 well as prevent accidents where two CLOBBERs are considered
5666 to be equal, thus producing an incorrect simplification. */
5667
5668 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5669 return new_rtx;
5670
5671 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5672 {
5673 machine_mode mode = GET_MODE (x);
5674
5675 x = simplify_subreg (GET_MODE (x), new_rtx,
5676 GET_MODE (SUBREG_REG (x)),
5677 SUBREG_BYTE (x));
5678 if (! x)
5679 x = gen_rtx_CLOBBER (mode, const0_rtx);
5680 }
5681 else if (CONST_SCALAR_INT_P (new_rtx)
5682 && (GET_CODE (x) == ZERO_EXTEND
5683 || GET_CODE (x) == SIGN_EXTEND
5684 || GET_CODE (x) == FLOAT
5685 || GET_CODE (x) == UNSIGNED_FLOAT))
5686 {
5687 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5688 new_rtx,
5689 GET_MODE (XEXP (x, 0)));
5690 if (!x)
5691 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5692 }
5693 else
5694 SUBST (XEXP (x, i), new_rtx);
5695 }
5696 }
5697 }
5698
5699 /* Check if we are loading something from the constant pool via float
5700 extension; in this case we would undo compress_float_constant
5701 optimization and degenerate constant load to an immediate value. */
5702 if (GET_CODE (x) == FLOAT_EXTEND
5703 && MEM_P (XEXP (x, 0))
5704 && MEM_READONLY_P (XEXP (x, 0)))
5705 {
5706 rtx tmp = avoid_constant_pool_reference (x);
5707 if (x != tmp)
5708 return x;
5709 }
5710
5711 /* Try to simplify X. If the simplification changed the code, it is likely
5712 that further simplification will help, so loop, but limit the number
5713 of repetitions that will be performed. */
5714
5715 for (i = 0; i < 4; i++)
5716 {
5717 /* If X is sufficiently simple, don't bother trying to do anything
5718 with it. */
5719 if (code != CONST_INT && code != REG && code != CLOBBER)
5720 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5721
5722 if (GET_CODE (x) == code)
5723 break;
5724
5725 code = GET_CODE (x);
5726
5727 /* We no longer know the original mode of operand 0 since we
5728 have changed the form of X) */
5729 op0_mode = VOIDmode;
5730 }
5731
5732 return x;
5733 }
5734 \f
5735 /* If X is a commutative operation whose operands are not in the canonical
5736 order, use substitutions to swap them. */
5737
5738 static void
5739 maybe_swap_commutative_operands (rtx x)
5740 {
5741 if (COMMUTATIVE_ARITH_P (x)
5742 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5743 {
5744 rtx temp = XEXP (x, 0);
5745 SUBST (XEXP (x, 0), XEXP (x, 1));
5746 SUBST (XEXP (x, 1), temp);
5747 }
5748 }
5749
5750 /* Simplify X, a piece of RTL. We just operate on the expression at the
5751 outer level; call `subst' to simplify recursively. Return the new
5752 expression.
5753
5754 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5755 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5756 of a condition. */
5757
5758 static rtx
5759 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5760 int in_cond)
5761 {
5762 enum rtx_code code = GET_CODE (x);
5763 machine_mode mode = GET_MODE (x);
5764 scalar_int_mode int_mode;
5765 rtx temp;
5766 int i;
5767
5768 /* If this is a commutative operation, put a constant last and a complex
5769 expression first. We don't need to do this for comparisons here. */
5770 maybe_swap_commutative_operands (x);
5771
5772 /* Try to fold this expression in case we have constants that weren't
5773 present before. */
5774 temp = 0;
5775 switch (GET_RTX_CLASS (code))
5776 {
5777 case RTX_UNARY:
5778 if (op0_mode == VOIDmode)
5779 op0_mode = GET_MODE (XEXP (x, 0));
5780 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5781 break;
5782 case RTX_COMPARE:
5783 case RTX_COMM_COMPARE:
5784 {
5785 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5786 if (cmp_mode == VOIDmode)
5787 {
5788 cmp_mode = GET_MODE (XEXP (x, 1));
5789 if (cmp_mode == VOIDmode)
5790 cmp_mode = op0_mode;
5791 }
5792 temp = simplify_relational_operation (code, mode, cmp_mode,
5793 XEXP (x, 0), XEXP (x, 1));
5794 }
5795 break;
5796 case RTX_COMM_ARITH:
5797 case RTX_BIN_ARITH:
5798 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5799 break;
5800 case RTX_BITFIELD_OPS:
5801 case RTX_TERNARY:
5802 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5803 XEXP (x, 1), XEXP (x, 2));
5804 break;
5805 default:
5806 break;
5807 }
5808
5809 if (temp)
5810 {
5811 x = temp;
5812 code = GET_CODE (temp);
5813 op0_mode = VOIDmode;
5814 mode = GET_MODE (temp);
5815 }
5816
5817 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5818 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5819 things. Check for cases where both arms are testing the same
5820 condition.
5821
5822 Don't do anything if all operands are very simple. */
5823
5824 if ((BINARY_P (x)
5825 && ((!OBJECT_P (XEXP (x, 0))
5826 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5827 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5828 || (!OBJECT_P (XEXP (x, 1))
5829 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5830 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5831 || (UNARY_P (x)
5832 && (!OBJECT_P (XEXP (x, 0))
5833 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5834 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5835 {
5836 rtx cond, true_rtx, false_rtx;
5837
5838 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5839 if (cond != 0
5840 /* If everything is a comparison, what we have is highly unlikely
5841 to be simpler, so don't use it. */
5842 && ! (COMPARISON_P (x)
5843 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5844 /* Similarly, if we end up with one of the expressions the same
5845 as the original, it is certainly not simpler. */
5846 && ! rtx_equal_p (x, true_rtx)
5847 && ! rtx_equal_p (x, false_rtx))
5848 {
5849 rtx cop1 = const0_rtx;
5850 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5851
5852 if (cond_code == NE && COMPARISON_P (cond))
5853 return x;
5854
5855 /* Simplify the alternative arms; this may collapse the true and
5856 false arms to store-flag values. Be careful to use copy_rtx
5857 here since true_rtx or false_rtx might share RTL with x as a
5858 result of the if_then_else_cond call above. */
5859 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5860 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5861
5862 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5863 is unlikely to be simpler. */
5864 if (general_operand (true_rtx, VOIDmode)
5865 && general_operand (false_rtx, VOIDmode))
5866 {
5867 enum rtx_code reversed;
5868
5869 /* Restarting if we generate a store-flag expression will cause
5870 us to loop. Just drop through in this case. */
5871
5872 /* If the result values are STORE_FLAG_VALUE and zero, we can
5873 just make the comparison operation. */
5874 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5875 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5876 cond, cop1);
5877 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5878 && ((reversed = reversed_comparison_code_parts
5879 (cond_code, cond, cop1, NULL))
5880 != UNKNOWN))
5881 x = simplify_gen_relational (reversed, mode, VOIDmode,
5882 cond, cop1);
5883
5884 /* Likewise, we can make the negate of a comparison operation
5885 if the result values are - STORE_FLAG_VALUE and zero. */
5886 else if (CONST_INT_P (true_rtx)
5887 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5888 && false_rtx == const0_rtx)
5889 x = simplify_gen_unary (NEG, mode,
5890 simplify_gen_relational (cond_code,
5891 mode, VOIDmode,
5892 cond, cop1),
5893 mode);
5894 else if (CONST_INT_P (false_rtx)
5895 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5896 && true_rtx == const0_rtx
5897 && ((reversed = reversed_comparison_code_parts
5898 (cond_code, cond, cop1, NULL))
5899 != UNKNOWN))
5900 x = simplify_gen_unary (NEG, mode,
5901 simplify_gen_relational (reversed,
5902 mode, VOIDmode,
5903 cond, cop1),
5904 mode);
5905
5906 code = GET_CODE (x);
5907 op0_mode = VOIDmode;
5908 }
5909 }
5910 }
5911
5912 /* First see if we can apply the inverse distributive law. */
5913 if (code == PLUS || code == MINUS
5914 || code == AND || code == IOR || code == XOR)
5915 {
5916 x = apply_distributive_law (x);
5917 code = GET_CODE (x);
5918 op0_mode = VOIDmode;
5919 }
5920
5921 /* If CODE is an associative operation not otherwise handled, see if we
5922 can associate some operands. This can win if they are constants or
5923 if they are logically related (i.e. (a & b) & a). */
5924 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5925 || code == AND || code == IOR || code == XOR
5926 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5927 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5928 || (flag_associative_math && FLOAT_MODE_P (mode))))
5929 {
5930 if (GET_CODE (XEXP (x, 0)) == code)
5931 {
5932 rtx other = XEXP (XEXP (x, 0), 0);
5933 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5934 rtx inner_op1 = XEXP (x, 1);
5935 rtx inner;
5936
5937 /* Make sure we pass the constant operand if any as the second
5938 one if this is a commutative operation. */
5939 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5940 std::swap (inner_op0, inner_op1);
5941 inner = simplify_binary_operation (code == MINUS ? PLUS
5942 : code == DIV ? MULT
5943 : code,
5944 mode, inner_op0, inner_op1);
5945
5946 /* For commutative operations, try the other pair if that one
5947 didn't simplify. */
5948 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5949 {
5950 other = XEXP (XEXP (x, 0), 1);
5951 inner = simplify_binary_operation (code, mode,
5952 XEXP (XEXP (x, 0), 0),
5953 XEXP (x, 1));
5954 }
5955
5956 if (inner)
5957 return simplify_gen_binary (code, mode, other, inner);
5958 }
5959 }
5960
5961 /* A little bit of algebraic simplification here. */
5962 switch (code)
5963 {
5964 case MEM:
5965 /* Ensure that our address has any ASHIFTs converted to MULT in case
5966 address-recognizing predicates are called later. */
5967 temp = make_compound_operation (XEXP (x, 0), MEM);
5968 SUBST (XEXP (x, 0), temp);
5969 break;
5970
5971 case SUBREG:
5972 if (op0_mode == VOIDmode)
5973 op0_mode = GET_MODE (SUBREG_REG (x));
5974
5975 /* See if this can be moved to simplify_subreg. */
5976 if (CONSTANT_P (SUBREG_REG (x))
5977 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5978 /* Don't call gen_lowpart if the inner mode
5979 is VOIDmode and we cannot simplify it, as SUBREG without
5980 inner mode is invalid. */
5981 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5982 || gen_lowpart_common (mode, SUBREG_REG (x))))
5983 return gen_lowpart (mode, SUBREG_REG (x));
5984
5985 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5986 break;
5987 {
5988 rtx temp;
5989 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5990 SUBREG_BYTE (x));
5991 if (temp)
5992 return temp;
5993
5994 /* If op is known to have all lower bits zero, the result is zero. */
5995 scalar_int_mode int_mode, int_op0_mode;
5996 if (!in_dest
5997 && is_a <scalar_int_mode> (mode, &int_mode)
5998 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5999 && (GET_MODE_PRECISION (int_mode)
6000 < GET_MODE_PRECISION (int_op0_mode))
6001 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
6002 SUBREG_BYTE (x))
6003 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
6004 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
6005 & GET_MODE_MASK (int_mode)) == 0)
6006 && !side_effects_p (SUBREG_REG (x)))
6007 return CONST0_RTX (int_mode);
6008 }
6009
6010 /* Don't change the mode of the MEM if that would change the meaning
6011 of the address. */
6012 if (MEM_P (SUBREG_REG (x))
6013 && (MEM_VOLATILE_P (SUBREG_REG (x))
6014 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
6015 MEM_ADDR_SPACE (SUBREG_REG (x)))))
6016 return gen_rtx_CLOBBER (mode, const0_rtx);
6017
6018 /* Note that we cannot do any narrowing for non-constants since
6019 we might have been counting on using the fact that some bits were
6020 zero. We now do this in the SET. */
6021
6022 break;
6023
6024 case NEG:
6025 temp = expand_compound_operation (XEXP (x, 0));
6026
6027 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6028 replaced by (lshiftrt X C). This will convert
6029 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
6030
6031 if (GET_CODE (temp) == ASHIFTRT
6032 && CONST_INT_P (XEXP (temp, 1))
6033 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
6034 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
6035 INTVAL (XEXP (temp, 1)));
6036
6037 /* If X has only a single bit that might be nonzero, say, bit I, convert
6038 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6039 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
6040 (sign_extract X 1 Y). But only do this if TEMP isn't a register
6041 or a SUBREG of one since we'd be making the expression more
6042 complex if it was just a register. */
6043
6044 if (!REG_P (temp)
6045 && ! (GET_CODE (temp) == SUBREG
6046 && REG_P (SUBREG_REG (temp)))
6047 && is_a <scalar_int_mode> (mode, &int_mode)
6048 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
6049 {
6050 rtx temp1 = simplify_shift_const
6051 (NULL_RTX, ASHIFTRT, int_mode,
6052 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
6053 GET_MODE_PRECISION (int_mode) - 1 - i),
6054 GET_MODE_PRECISION (int_mode) - 1 - i);
6055
6056 /* If all we did was surround TEMP with the two shifts, we
6057 haven't improved anything, so don't use it. Otherwise,
6058 we are better off with TEMP1. */
6059 if (GET_CODE (temp1) != ASHIFTRT
6060 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
6061 || XEXP (XEXP (temp1, 0), 0) != temp)
6062 return temp1;
6063 }
6064 break;
6065
6066 case TRUNCATE:
6067 /* We can't handle truncation to a partial integer mode here
6068 because we don't know the real bitsize of the partial
6069 integer mode. */
6070 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
6071 break;
6072
6073 if (HWI_COMPUTABLE_MODE_P (mode))
6074 SUBST (XEXP (x, 0),
6075 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6076 GET_MODE_MASK (mode), 0));
6077
6078 /* We can truncate a constant value and return it. */
6079 {
6080 poly_int64 c;
6081 if (poly_int_rtx_p (XEXP (x, 0), &c))
6082 return gen_int_mode (c, mode);
6083 }
6084
6085 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6086 whose value is a comparison can be replaced with a subreg if
6087 STORE_FLAG_VALUE permits. */
6088 if (HWI_COMPUTABLE_MODE_P (mode)
6089 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6090 && (temp = get_last_value (XEXP (x, 0)))
6091 && COMPARISON_P (temp))
6092 return gen_lowpart (mode, XEXP (x, 0));
6093 break;
6094
6095 case CONST:
6096 /* (const (const X)) can become (const X). Do it this way rather than
6097 returning the inner CONST since CONST can be shared with a
6098 REG_EQUAL note. */
6099 if (GET_CODE (XEXP (x, 0)) == CONST)
6100 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6101 break;
6102
6103 case LO_SUM:
6104 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6105 can add in an offset. find_split_point will split this address up
6106 again if it doesn't match. */
6107 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6108 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6109 return XEXP (x, 1);
6110 break;
6111
6112 case PLUS:
6113 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6114 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6115 bit-field and can be replaced by either a sign_extend or a
6116 sign_extract. The `and' may be a zero_extend and the two
6117 <c>, -<c> constants may be reversed. */
6118 if (GET_CODE (XEXP (x, 0)) == XOR
6119 && is_a <scalar_int_mode> (mode, &int_mode)
6120 && CONST_INT_P (XEXP (x, 1))
6121 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6122 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6123 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6124 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6125 && HWI_COMPUTABLE_MODE_P (int_mode)
6126 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6127 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6128 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6129 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6130 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6131 && known_eq ((GET_MODE_PRECISION
6132 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6133 (unsigned int) i + 1))))
6134 return simplify_shift_const
6135 (NULL_RTX, ASHIFTRT, int_mode,
6136 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6137 XEXP (XEXP (XEXP (x, 0), 0), 0),
6138 GET_MODE_PRECISION (int_mode) - (i + 1)),
6139 GET_MODE_PRECISION (int_mode) - (i + 1));
6140
6141 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6142 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6143 the bitsize of the mode - 1. This allows simplification of
6144 "a = (b & 8) == 0;" */
6145 if (XEXP (x, 1) == constm1_rtx
6146 && !REG_P (XEXP (x, 0))
6147 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6148 && REG_P (SUBREG_REG (XEXP (x, 0))))
6149 && is_a <scalar_int_mode> (mode, &int_mode)
6150 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6151 return simplify_shift_const
6152 (NULL_RTX, ASHIFTRT, int_mode,
6153 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6154 gen_rtx_XOR (int_mode, XEXP (x, 0),
6155 const1_rtx),
6156 GET_MODE_PRECISION (int_mode) - 1),
6157 GET_MODE_PRECISION (int_mode) - 1);
6158
6159 /* If we are adding two things that have no bits in common, convert
6160 the addition into an IOR. This will often be further simplified,
6161 for example in cases like ((a & 1) + (a & 2)), which can
6162 become a & 3. */
6163
6164 if (HWI_COMPUTABLE_MODE_P (mode)
6165 && (nonzero_bits (XEXP (x, 0), mode)
6166 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6167 {
6168 /* Try to simplify the expression further. */
6169 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6170 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6171
6172 /* If we could, great. If not, do not go ahead with the IOR
6173 replacement, since PLUS appears in many special purpose
6174 address arithmetic instructions. */
6175 if (GET_CODE (temp) != CLOBBER
6176 && (GET_CODE (temp) != IOR
6177 || ((XEXP (temp, 0) != XEXP (x, 0)
6178 || XEXP (temp, 1) != XEXP (x, 1))
6179 && (XEXP (temp, 0) != XEXP (x, 1)
6180 || XEXP (temp, 1) != XEXP (x, 0)))))
6181 return temp;
6182 }
6183
6184 /* Canonicalize x + x into x << 1. */
6185 if (GET_MODE_CLASS (mode) == MODE_INT
6186 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6187 && !side_effects_p (XEXP (x, 0)))
6188 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6189
6190 break;
6191
6192 case MINUS:
6193 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6194 (and <foo> (const_int pow2-1)) */
6195 if (is_a <scalar_int_mode> (mode, &int_mode)
6196 && GET_CODE (XEXP (x, 1)) == AND
6197 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6198 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6199 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6200 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6201 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6202 break;
6203
6204 case MULT:
6205 /* If we have (mult (plus A B) C), apply the distributive law and then
6206 the inverse distributive law to see if things simplify. This
6207 occurs mostly in addresses, often when unrolling loops. */
6208
6209 if (GET_CODE (XEXP (x, 0)) == PLUS)
6210 {
6211 rtx result = distribute_and_simplify_rtx (x, 0);
6212 if (result)
6213 return result;
6214 }
6215
6216 /* Try simplify a*(b/c) as (a*b)/c. */
6217 if (FLOAT_MODE_P (mode) && flag_associative_math
6218 && GET_CODE (XEXP (x, 0)) == DIV)
6219 {
6220 rtx tem = simplify_binary_operation (MULT, mode,
6221 XEXP (XEXP (x, 0), 0),
6222 XEXP (x, 1));
6223 if (tem)
6224 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6225 }
6226 break;
6227
6228 case UDIV:
6229 /* If this is a divide by a power of two, treat it as a shift if
6230 its first operand is a shift. */
6231 if (is_a <scalar_int_mode> (mode, &int_mode)
6232 && CONST_INT_P (XEXP (x, 1))
6233 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6234 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6235 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6236 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6237 || GET_CODE (XEXP (x, 0)) == ROTATE
6238 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6239 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6240 XEXP (x, 0), i);
6241 break;
6242
6243 case EQ: case NE:
6244 case GT: case GTU: case GE: case GEU:
6245 case LT: case LTU: case LE: case LEU:
6246 case UNEQ: case LTGT:
6247 case UNGT: case UNGE:
6248 case UNLT: case UNLE:
6249 case UNORDERED: case ORDERED:
6250 /* If the first operand is a condition code, we can't do anything
6251 with it. */
6252 if (GET_CODE (XEXP (x, 0)) == COMPARE
6253 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6254 && ! CC0_P (XEXP (x, 0))))
6255 {
6256 rtx op0 = XEXP (x, 0);
6257 rtx op1 = XEXP (x, 1);
6258 enum rtx_code new_code;
6259
6260 if (GET_CODE (op0) == COMPARE)
6261 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6262
6263 /* Simplify our comparison, if possible. */
6264 new_code = simplify_comparison (code, &op0, &op1);
6265
6266 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6267 if only the low-order bit is possibly nonzero in X (such as when
6268 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6269 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6270 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6271 (plus X 1).
6272
6273 Remove any ZERO_EXTRACT we made when thinking this was a
6274 comparison. It may now be simpler to use, e.g., an AND. If a
6275 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6276 the call to make_compound_operation in the SET case.
6277
6278 Don't apply these optimizations if the caller would
6279 prefer a comparison rather than a value.
6280 E.g., for the condition in an IF_THEN_ELSE most targets need
6281 an explicit comparison. */
6282
6283 if (in_cond)
6284 ;
6285
6286 else if (STORE_FLAG_VALUE == 1
6287 && new_code == NE
6288 && is_int_mode (mode, &int_mode)
6289 && op1 == const0_rtx
6290 && int_mode == GET_MODE (op0)
6291 && nonzero_bits (op0, int_mode) == 1)
6292 return gen_lowpart (int_mode,
6293 expand_compound_operation (op0));
6294
6295 else if (STORE_FLAG_VALUE == 1
6296 && new_code == NE
6297 && is_int_mode (mode, &int_mode)
6298 && op1 == const0_rtx
6299 && int_mode == GET_MODE (op0)
6300 && (num_sign_bit_copies (op0, int_mode)
6301 == GET_MODE_PRECISION (int_mode)))
6302 {
6303 op0 = expand_compound_operation (op0);
6304 return simplify_gen_unary (NEG, int_mode,
6305 gen_lowpart (int_mode, op0),
6306 int_mode);
6307 }
6308
6309 else if (STORE_FLAG_VALUE == 1
6310 && new_code == EQ
6311 && is_int_mode (mode, &int_mode)
6312 && op1 == const0_rtx
6313 && int_mode == GET_MODE (op0)
6314 && nonzero_bits (op0, int_mode) == 1)
6315 {
6316 op0 = expand_compound_operation (op0);
6317 return simplify_gen_binary (XOR, int_mode,
6318 gen_lowpart (int_mode, op0),
6319 const1_rtx);
6320 }
6321
6322 else if (STORE_FLAG_VALUE == 1
6323 && new_code == EQ
6324 && is_int_mode (mode, &int_mode)
6325 && op1 == const0_rtx
6326 && int_mode == GET_MODE (op0)
6327 && (num_sign_bit_copies (op0, int_mode)
6328 == GET_MODE_PRECISION (int_mode)))
6329 {
6330 op0 = expand_compound_operation (op0);
6331 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6332 }
6333
6334 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6335 those above. */
6336 if (in_cond)
6337 ;
6338
6339 else if (STORE_FLAG_VALUE == -1
6340 && new_code == NE
6341 && is_int_mode (mode, &int_mode)
6342 && op1 == const0_rtx
6343 && int_mode == GET_MODE (op0)
6344 && (num_sign_bit_copies (op0, int_mode)
6345 == GET_MODE_PRECISION (int_mode)))
6346 return gen_lowpart (int_mode, expand_compound_operation (op0));
6347
6348 else if (STORE_FLAG_VALUE == -1
6349 && new_code == NE
6350 && is_int_mode (mode, &int_mode)
6351 && op1 == const0_rtx
6352 && int_mode == GET_MODE (op0)
6353 && nonzero_bits (op0, int_mode) == 1)
6354 {
6355 op0 = expand_compound_operation (op0);
6356 return simplify_gen_unary (NEG, int_mode,
6357 gen_lowpart (int_mode, op0),
6358 int_mode);
6359 }
6360
6361 else if (STORE_FLAG_VALUE == -1
6362 && new_code == EQ
6363 && is_int_mode (mode, &int_mode)
6364 && op1 == const0_rtx
6365 && int_mode == GET_MODE (op0)
6366 && (num_sign_bit_copies (op0, int_mode)
6367 == GET_MODE_PRECISION (int_mode)))
6368 {
6369 op0 = expand_compound_operation (op0);
6370 return simplify_gen_unary (NOT, int_mode,
6371 gen_lowpart (int_mode, op0),
6372 int_mode);
6373 }
6374
6375 /* If X is 0/1, (eq X 0) is X-1. */
6376 else if (STORE_FLAG_VALUE == -1
6377 && new_code == EQ
6378 && is_int_mode (mode, &int_mode)
6379 && op1 == const0_rtx
6380 && int_mode == GET_MODE (op0)
6381 && nonzero_bits (op0, int_mode) == 1)
6382 {
6383 op0 = expand_compound_operation (op0);
6384 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6385 }
6386
6387 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6388 one bit that might be nonzero, we can convert (ne x 0) to
6389 (ashift x c) where C puts the bit in the sign bit. Remove any
6390 AND with STORE_FLAG_VALUE when we are done, since we are only
6391 going to test the sign bit. */
6392 if (new_code == NE
6393 && is_int_mode (mode, &int_mode)
6394 && HWI_COMPUTABLE_MODE_P (int_mode)
6395 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6396 && op1 == const0_rtx
6397 && int_mode == GET_MODE (op0)
6398 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6399 {
6400 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6401 expand_compound_operation (op0),
6402 GET_MODE_PRECISION (int_mode) - 1 - i);
6403 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6404 return XEXP (x, 0);
6405 else
6406 return x;
6407 }
6408
6409 /* If the code changed, return a whole new comparison.
6410 We also need to avoid using SUBST in cases where
6411 simplify_comparison has widened a comparison with a CONST_INT,
6412 since in that case the wider CONST_INT may fail the sanity
6413 checks in do_SUBST. */
6414 if (new_code != code
6415 || (CONST_INT_P (op1)
6416 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6417 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6418 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6419
6420 /* Otherwise, keep this operation, but maybe change its operands.
6421 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6422 SUBST (XEXP (x, 0), op0);
6423 SUBST (XEXP (x, 1), op1);
6424 }
6425 break;
6426
6427 case IF_THEN_ELSE:
6428 return simplify_if_then_else (x);
6429
6430 case ZERO_EXTRACT:
6431 case SIGN_EXTRACT:
6432 case ZERO_EXTEND:
6433 case SIGN_EXTEND:
6434 /* If we are processing SET_DEST, we are done. */
6435 if (in_dest)
6436 return x;
6437
6438 return expand_compound_operation (x);
6439
6440 case SET:
6441 return simplify_set (x);
6442
6443 case AND:
6444 case IOR:
6445 return simplify_logical (x);
6446
6447 case ASHIFT:
6448 case LSHIFTRT:
6449 case ASHIFTRT:
6450 case ROTATE:
6451 case ROTATERT:
6452 /* If this is a shift by a constant amount, simplify it. */
6453 if (CONST_INT_P (XEXP (x, 1)))
6454 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6455 INTVAL (XEXP (x, 1)));
6456
6457 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6458 SUBST (XEXP (x, 1),
6459 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6460 (HOST_WIDE_INT_1U
6461 << exact_log2 (GET_MODE_UNIT_BITSIZE
6462 (GET_MODE (x))))
6463 - 1,
6464 0));
6465 break;
6466
6467 default:
6468 break;
6469 }
6470
6471 return x;
6472 }
6473 \f
6474 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6475
6476 static rtx
6477 simplify_if_then_else (rtx x)
6478 {
6479 machine_mode mode = GET_MODE (x);
6480 rtx cond = XEXP (x, 0);
6481 rtx true_rtx = XEXP (x, 1);
6482 rtx false_rtx = XEXP (x, 2);
6483 enum rtx_code true_code = GET_CODE (cond);
6484 int comparison_p = COMPARISON_P (cond);
6485 rtx temp;
6486 int i;
6487 enum rtx_code false_code;
6488 rtx reversed;
6489 scalar_int_mode int_mode, inner_mode;
6490
6491 /* Simplify storing of the truth value. */
6492 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6493 return simplify_gen_relational (true_code, mode, VOIDmode,
6494 XEXP (cond, 0), XEXP (cond, 1));
6495
6496 /* Also when the truth value has to be reversed. */
6497 if (comparison_p
6498 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6499 && (reversed = reversed_comparison (cond, mode)))
6500 return reversed;
6501
6502 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6503 in it is being compared against certain values. Get the true and false
6504 comparisons and see if that says anything about the value of each arm. */
6505
6506 if (comparison_p
6507 && ((false_code = reversed_comparison_code (cond, NULL))
6508 != UNKNOWN)
6509 && REG_P (XEXP (cond, 0)))
6510 {
6511 HOST_WIDE_INT nzb;
6512 rtx from = XEXP (cond, 0);
6513 rtx true_val = XEXP (cond, 1);
6514 rtx false_val = true_val;
6515 int swapped = 0;
6516
6517 /* If FALSE_CODE is EQ, swap the codes and arms. */
6518
6519 if (false_code == EQ)
6520 {
6521 swapped = 1, true_code = EQ, false_code = NE;
6522 std::swap (true_rtx, false_rtx);
6523 }
6524
6525 scalar_int_mode from_mode;
6526 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6527 {
6528 /* If we are comparing against zero and the expression being
6529 tested has only a single bit that might be nonzero, that is
6530 its value when it is not equal to zero. Similarly if it is
6531 known to be -1 or 0. */
6532 if (true_code == EQ
6533 && true_val == const0_rtx
6534 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6535 {
6536 false_code = EQ;
6537 false_val = gen_int_mode (nzb, from_mode);
6538 }
6539 else if (true_code == EQ
6540 && true_val == const0_rtx
6541 && (num_sign_bit_copies (from, from_mode)
6542 == GET_MODE_PRECISION (from_mode)))
6543 {
6544 false_code = EQ;
6545 false_val = constm1_rtx;
6546 }
6547 }
6548
6549 /* Now simplify an arm if we know the value of the register in the
6550 branch and it is used in the arm. Be careful due to the potential
6551 of locally-shared RTL. */
6552
6553 if (reg_mentioned_p (from, true_rtx))
6554 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6555 from, true_val),
6556 pc_rtx, pc_rtx, 0, 0, 0);
6557 if (reg_mentioned_p (from, false_rtx))
6558 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6559 from, false_val),
6560 pc_rtx, pc_rtx, 0, 0, 0);
6561
6562 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6563 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6564
6565 true_rtx = XEXP (x, 1);
6566 false_rtx = XEXP (x, 2);
6567 true_code = GET_CODE (cond);
6568 }
6569
6570 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6571 reversed, do so to avoid needing two sets of patterns for
6572 subtract-and-branch insns. Similarly if we have a constant in the true
6573 arm, the false arm is the same as the first operand of the comparison, or
6574 the false arm is more complicated than the true arm. */
6575
6576 if (comparison_p
6577 && reversed_comparison_code (cond, NULL) != UNKNOWN
6578 && (true_rtx == pc_rtx
6579 || (CONSTANT_P (true_rtx)
6580 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6581 || true_rtx == const0_rtx
6582 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6583 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6584 && !OBJECT_P (false_rtx))
6585 || reg_mentioned_p (true_rtx, false_rtx)
6586 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6587 {
6588 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6589 SUBST (XEXP (x, 1), false_rtx);
6590 SUBST (XEXP (x, 2), true_rtx);
6591
6592 std::swap (true_rtx, false_rtx);
6593 cond = XEXP (x, 0);
6594
6595 /* It is possible that the conditional has been simplified out. */
6596 true_code = GET_CODE (cond);
6597 comparison_p = COMPARISON_P (cond);
6598 }
6599
6600 /* If the two arms are identical, we don't need the comparison. */
6601
6602 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6603 return true_rtx;
6604
6605 /* Convert a == b ? b : a to "a". */
6606 if (true_code == EQ && ! side_effects_p (cond)
6607 && !HONOR_NANS (mode)
6608 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6609 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6610 return false_rtx;
6611 else if (true_code == NE && ! side_effects_p (cond)
6612 && !HONOR_NANS (mode)
6613 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6614 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6615 return true_rtx;
6616
6617 /* Look for cases where we have (abs x) or (neg (abs X)). */
6618
6619 if (GET_MODE_CLASS (mode) == MODE_INT
6620 && comparison_p
6621 && XEXP (cond, 1) == const0_rtx
6622 && GET_CODE (false_rtx) == NEG
6623 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6624 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6625 && ! side_effects_p (true_rtx))
6626 switch (true_code)
6627 {
6628 case GT:
6629 case GE:
6630 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6631 case LT:
6632 case LE:
6633 return
6634 simplify_gen_unary (NEG, mode,
6635 simplify_gen_unary (ABS, mode, true_rtx, mode),
6636 mode);
6637 default:
6638 break;
6639 }
6640
6641 /* Look for MIN or MAX. */
6642
6643 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6644 && comparison_p
6645 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6646 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6647 && ! side_effects_p (cond))
6648 switch (true_code)
6649 {
6650 case GE:
6651 case GT:
6652 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6653 case LE:
6654 case LT:
6655 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6656 case GEU:
6657 case GTU:
6658 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6659 case LEU:
6660 case LTU:
6661 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6662 default:
6663 break;
6664 }
6665
6666 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6667 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6668 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6669 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6670 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6671 neither 1 or -1, but it isn't worth checking for. */
6672
6673 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6674 && comparison_p
6675 && is_int_mode (mode, &int_mode)
6676 && ! side_effects_p (x))
6677 {
6678 rtx t = make_compound_operation (true_rtx, SET);
6679 rtx f = make_compound_operation (false_rtx, SET);
6680 rtx cond_op0 = XEXP (cond, 0);
6681 rtx cond_op1 = XEXP (cond, 1);
6682 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6683 scalar_int_mode m = int_mode;
6684 rtx z = 0, c1 = NULL_RTX;
6685
6686 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6687 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6688 || GET_CODE (t) == ASHIFT
6689 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6690 && rtx_equal_p (XEXP (t, 0), f))
6691 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6692
6693 /* If an identity-zero op is commutative, check whether there
6694 would be a match if we swapped the operands. */
6695 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6696 || GET_CODE (t) == XOR)
6697 && rtx_equal_p (XEXP (t, 1), f))
6698 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6699 else if (GET_CODE (t) == SIGN_EXTEND
6700 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6701 && (GET_CODE (XEXP (t, 0)) == PLUS
6702 || GET_CODE (XEXP (t, 0)) == MINUS
6703 || GET_CODE (XEXP (t, 0)) == IOR
6704 || GET_CODE (XEXP (t, 0)) == XOR
6705 || GET_CODE (XEXP (t, 0)) == ASHIFT
6706 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6707 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6708 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6709 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6710 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6711 && (num_sign_bit_copies (f, GET_MODE (f))
6712 > (unsigned int)
6713 (GET_MODE_PRECISION (int_mode)
6714 - GET_MODE_PRECISION (inner_mode))))
6715 {
6716 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6717 extend_op = SIGN_EXTEND;
6718 m = inner_mode;
6719 }
6720 else if (GET_CODE (t) == SIGN_EXTEND
6721 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6722 && (GET_CODE (XEXP (t, 0)) == PLUS
6723 || GET_CODE (XEXP (t, 0)) == IOR
6724 || GET_CODE (XEXP (t, 0)) == XOR)
6725 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6726 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6727 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6728 && (num_sign_bit_copies (f, GET_MODE (f))
6729 > (unsigned int)
6730 (GET_MODE_PRECISION (int_mode)
6731 - GET_MODE_PRECISION (inner_mode))))
6732 {
6733 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6734 extend_op = SIGN_EXTEND;
6735 m = inner_mode;
6736 }
6737 else if (GET_CODE (t) == ZERO_EXTEND
6738 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6739 && (GET_CODE (XEXP (t, 0)) == PLUS
6740 || GET_CODE (XEXP (t, 0)) == MINUS
6741 || GET_CODE (XEXP (t, 0)) == IOR
6742 || GET_CODE (XEXP (t, 0)) == XOR
6743 || GET_CODE (XEXP (t, 0)) == ASHIFT
6744 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6745 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6746 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6747 && HWI_COMPUTABLE_MODE_P (int_mode)
6748 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6749 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6750 && ((nonzero_bits (f, GET_MODE (f))
6751 & ~GET_MODE_MASK (inner_mode))
6752 == 0))
6753 {
6754 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6755 extend_op = ZERO_EXTEND;
6756 m = inner_mode;
6757 }
6758 else if (GET_CODE (t) == ZERO_EXTEND
6759 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6760 && (GET_CODE (XEXP (t, 0)) == PLUS
6761 || GET_CODE (XEXP (t, 0)) == IOR
6762 || GET_CODE (XEXP (t, 0)) == XOR)
6763 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6764 && HWI_COMPUTABLE_MODE_P (int_mode)
6765 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6766 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6767 && ((nonzero_bits (f, GET_MODE (f))
6768 & ~GET_MODE_MASK (inner_mode))
6769 == 0))
6770 {
6771 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6772 extend_op = ZERO_EXTEND;
6773 m = inner_mode;
6774 }
6775
6776 if (z)
6777 {
6778 machine_mode cm = m;
6779 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6780 && GET_MODE (c1) != VOIDmode)
6781 cm = GET_MODE (c1);
6782 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6783 cond_op0, cond_op1),
6784 pc_rtx, pc_rtx, 0, 0, 0);
6785 temp = simplify_gen_binary (MULT, cm, temp,
6786 simplify_gen_binary (MULT, cm, c1,
6787 const_true_rtx));
6788 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6789 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6790
6791 if (extend_op != UNKNOWN)
6792 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6793
6794 return temp;
6795 }
6796 }
6797
6798 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6799 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6800 negation of a single bit, we can convert this operation to a shift. We
6801 can actually do this more generally, but it doesn't seem worth it. */
6802
6803 if (true_code == NE
6804 && is_a <scalar_int_mode> (mode, &int_mode)
6805 && XEXP (cond, 1) == const0_rtx
6806 && false_rtx == const0_rtx
6807 && CONST_INT_P (true_rtx)
6808 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6809 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6810 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6811 == GET_MODE_PRECISION (int_mode))
6812 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6813 return
6814 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6815 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6816
6817 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6818 non-zero bit in A is C1. */
6819 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6820 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6821 && is_a <scalar_int_mode> (mode, &int_mode)
6822 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6823 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6824 == nonzero_bits (XEXP (cond, 0), inner_mode)
6825 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6826 {
6827 rtx val = XEXP (cond, 0);
6828 if (inner_mode == int_mode)
6829 return val;
6830 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6831 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6832 }
6833
6834 return x;
6835 }
6836 \f
6837 /* Simplify X, a SET expression. Return the new expression. */
6838
6839 static rtx
6840 simplify_set (rtx x)
6841 {
6842 rtx src = SET_SRC (x);
6843 rtx dest = SET_DEST (x);
6844 machine_mode mode
6845 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6846 rtx_insn *other_insn;
6847 rtx *cc_use;
6848 scalar_int_mode int_mode;
6849
6850 /* (set (pc) (return)) gets written as (return). */
6851 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6852 return src;
6853
6854 /* Now that we know for sure which bits of SRC we are using, see if we can
6855 simplify the expression for the object knowing that we only need the
6856 low-order bits. */
6857
6858 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6859 {
6860 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6861 SUBST (SET_SRC (x), src);
6862 }
6863
6864 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6865 the comparison result and try to simplify it unless we already have used
6866 undobuf.other_insn. */
6867 if ((GET_MODE_CLASS (mode) == MODE_CC
6868 || GET_CODE (src) == COMPARE
6869 || CC0_P (dest))
6870 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6871 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6872 && COMPARISON_P (*cc_use)
6873 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6874 {
6875 enum rtx_code old_code = GET_CODE (*cc_use);
6876 enum rtx_code new_code;
6877 rtx op0, op1, tmp;
6878 int other_changed = 0;
6879 rtx inner_compare = NULL_RTX;
6880 machine_mode compare_mode = GET_MODE (dest);
6881
6882 if (GET_CODE (src) == COMPARE)
6883 {
6884 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6885 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6886 {
6887 inner_compare = op0;
6888 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6889 }
6890 }
6891 else
6892 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6893
6894 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6895 op0, op1);
6896 if (!tmp)
6897 new_code = old_code;
6898 else if (!CONSTANT_P (tmp))
6899 {
6900 new_code = GET_CODE (tmp);
6901 op0 = XEXP (tmp, 0);
6902 op1 = XEXP (tmp, 1);
6903 }
6904 else
6905 {
6906 rtx pat = PATTERN (other_insn);
6907 undobuf.other_insn = other_insn;
6908 SUBST (*cc_use, tmp);
6909
6910 /* Attempt to simplify CC user. */
6911 if (GET_CODE (pat) == SET)
6912 {
6913 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6914 if (new_rtx != NULL_RTX)
6915 SUBST (SET_SRC (pat), new_rtx);
6916 }
6917
6918 /* Convert X into a no-op move. */
6919 SUBST (SET_DEST (x), pc_rtx);
6920 SUBST (SET_SRC (x), pc_rtx);
6921 return x;
6922 }
6923
6924 /* Simplify our comparison, if possible. */
6925 new_code = simplify_comparison (new_code, &op0, &op1);
6926
6927 #ifdef SELECT_CC_MODE
6928 /* If this machine has CC modes other than CCmode, check to see if we
6929 need to use a different CC mode here. */
6930 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6931 compare_mode = GET_MODE (op0);
6932 else if (inner_compare
6933 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6934 && new_code == old_code
6935 && op0 == XEXP (inner_compare, 0)
6936 && op1 == XEXP (inner_compare, 1))
6937 compare_mode = GET_MODE (inner_compare);
6938 else
6939 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6940
6941 /* If the mode changed, we have to change SET_DEST, the mode in the
6942 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6943 a hard register, just build new versions with the proper mode. If it
6944 is a pseudo, we lose unless it is only time we set the pseudo, in
6945 which case we can safely change its mode. */
6946 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6947 {
6948 if (can_change_dest_mode (dest, 0, compare_mode))
6949 {
6950 unsigned int regno = REGNO (dest);
6951 rtx new_dest;
6952
6953 if (regno < FIRST_PSEUDO_REGISTER)
6954 new_dest = gen_rtx_REG (compare_mode, regno);
6955 else
6956 {
6957 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6958 new_dest = regno_reg_rtx[regno];
6959 }
6960
6961 SUBST (SET_DEST (x), new_dest);
6962 SUBST (XEXP (*cc_use, 0), new_dest);
6963 other_changed = 1;
6964
6965 dest = new_dest;
6966 }
6967 }
6968 #endif /* SELECT_CC_MODE */
6969
6970 /* If the code changed, we have to build a new comparison in
6971 undobuf.other_insn. */
6972 if (new_code != old_code)
6973 {
6974 int other_changed_previously = other_changed;
6975 unsigned HOST_WIDE_INT mask;
6976 rtx old_cc_use = *cc_use;
6977
6978 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6979 dest, const0_rtx));
6980 other_changed = 1;
6981
6982 /* If the only change we made was to change an EQ into an NE or
6983 vice versa, OP0 has only one bit that might be nonzero, and OP1
6984 is zero, check if changing the user of the condition code will
6985 produce a valid insn. If it won't, we can keep the original code
6986 in that insn by surrounding our operation with an XOR. */
6987
6988 if (((old_code == NE && new_code == EQ)
6989 || (old_code == EQ && new_code == NE))
6990 && ! other_changed_previously && op1 == const0_rtx
6991 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6992 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6993 {
6994 rtx pat = PATTERN (other_insn), note = 0;
6995
6996 if ((recog_for_combine (&pat, other_insn, &note) < 0
6997 && ! check_asm_operands (pat)))
6998 {
6999 *cc_use = old_cc_use;
7000 other_changed = 0;
7001
7002 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
7003 gen_int_mode (mask,
7004 GET_MODE (op0)));
7005 }
7006 }
7007 }
7008
7009 if (other_changed)
7010 undobuf.other_insn = other_insn;
7011
7012 /* Don't generate a compare of a CC with 0, just use that CC. */
7013 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
7014 {
7015 SUBST (SET_SRC (x), op0);
7016 src = SET_SRC (x);
7017 }
7018 /* Otherwise, if we didn't previously have the same COMPARE we
7019 want, create it from scratch. */
7020 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
7021 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
7022 {
7023 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
7024 src = SET_SRC (x);
7025 }
7026 }
7027 else
7028 {
7029 /* Get SET_SRC in a form where we have placed back any
7030 compound expressions. Then do the checks below. */
7031 src = make_compound_operation (src, SET);
7032 SUBST (SET_SRC (x), src);
7033 }
7034
7035 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7036 and X being a REG or (subreg (reg)), we may be able to convert this to
7037 (set (subreg:m2 x) (op)).
7038
7039 We can always do this if M1 is narrower than M2 because that means that
7040 we only care about the low bits of the result.
7041
7042 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7043 perform a narrower operation than requested since the high-order bits will
7044 be undefined. On machine where it is defined, this transformation is safe
7045 as long as M1 and M2 have the same number of words. */
7046
7047 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
7048 && !OBJECT_P (SUBREG_REG (src))
7049 && (known_equal_after_align_up
7050 (GET_MODE_SIZE (GET_MODE (src)),
7051 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
7052 UNITS_PER_WORD))
7053 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
7054 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
7055 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
7056 GET_MODE (SUBREG_REG (src)),
7057 GET_MODE (src)))
7058 && (REG_P (dest)
7059 || (GET_CODE (dest) == SUBREG
7060 && REG_P (SUBREG_REG (dest)))))
7061 {
7062 SUBST (SET_DEST (x),
7063 gen_lowpart (GET_MODE (SUBREG_REG (src)),
7064 dest));
7065 SUBST (SET_SRC (x), SUBREG_REG (src));
7066
7067 src = SET_SRC (x), dest = SET_DEST (x);
7068 }
7069
7070 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7071 in SRC. */
7072 if (dest == cc0_rtx
7073 && partial_subreg_p (src)
7074 && subreg_lowpart_p (src))
7075 {
7076 rtx inner = SUBREG_REG (src);
7077 machine_mode inner_mode = GET_MODE (inner);
7078
7079 /* Here we make sure that we don't have a sign bit on. */
7080 if (val_signbit_known_clear_p (GET_MODE (src),
7081 nonzero_bits (inner, inner_mode)))
7082 {
7083 SUBST (SET_SRC (x), inner);
7084 src = SET_SRC (x);
7085 }
7086 }
7087
7088 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7089 would require a paradoxical subreg. Replace the subreg with a
7090 zero_extend to avoid the reload that would otherwise be required.
7091 Don't do this unless we have a scalar integer mode, otherwise the
7092 transformation is incorrect. */
7093
7094 enum rtx_code extend_op;
7095 if (paradoxical_subreg_p (src)
7096 && MEM_P (SUBREG_REG (src))
7097 && SCALAR_INT_MODE_P (GET_MODE (src))
7098 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7099 {
7100 SUBST (SET_SRC (x),
7101 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7102
7103 src = SET_SRC (x);
7104 }
7105
7106 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7107 are comparing an item known to be 0 or -1 against 0, use a logical
7108 operation instead. Check for one of the arms being an IOR of the other
7109 arm with some value. We compute three terms to be IOR'ed together. In
7110 practice, at most two will be nonzero. Then we do the IOR's. */
7111
7112 if (GET_CODE (dest) != PC
7113 && GET_CODE (src) == IF_THEN_ELSE
7114 && is_int_mode (GET_MODE (src), &int_mode)
7115 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7116 && XEXP (XEXP (src, 0), 1) == const0_rtx
7117 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7118 && (!HAVE_conditional_move
7119 || ! can_conditionally_move_p (int_mode))
7120 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7121 == GET_MODE_PRECISION (int_mode))
7122 && ! side_effects_p (src))
7123 {
7124 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7125 ? XEXP (src, 1) : XEXP (src, 2));
7126 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7127 ? XEXP (src, 2) : XEXP (src, 1));
7128 rtx term1 = const0_rtx, term2, term3;
7129
7130 if (GET_CODE (true_rtx) == IOR
7131 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7132 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7133 else if (GET_CODE (true_rtx) == IOR
7134 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7135 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7136 else if (GET_CODE (false_rtx) == IOR
7137 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7138 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7139 else if (GET_CODE (false_rtx) == IOR
7140 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7141 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7142
7143 term2 = simplify_gen_binary (AND, int_mode,
7144 XEXP (XEXP (src, 0), 0), true_rtx);
7145 term3 = simplify_gen_binary (AND, int_mode,
7146 simplify_gen_unary (NOT, int_mode,
7147 XEXP (XEXP (src, 0), 0),
7148 int_mode),
7149 false_rtx);
7150
7151 SUBST (SET_SRC (x),
7152 simplify_gen_binary (IOR, int_mode,
7153 simplify_gen_binary (IOR, int_mode,
7154 term1, term2),
7155 term3));
7156
7157 src = SET_SRC (x);
7158 }
7159
7160 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7161 whole thing fail. */
7162 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7163 return src;
7164 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7165 return dest;
7166 else
7167 /* Convert this into a field assignment operation, if possible. */
7168 return make_field_assignment (x);
7169 }
7170 \f
7171 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7172 result. */
7173
7174 static rtx
7175 simplify_logical (rtx x)
7176 {
7177 rtx op0 = XEXP (x, 0);
7178 rtx op1 = XEXP (x, 1);
7179 scalar_int_mode mode;
7180
7181 switch (GET_CODE (x))
7182 {
7183 case AND:
7184 /* We can call simplify_and_const_int only if we don't lose
7185 any (sign) bits when converting INTVAL (op1) to
7186 "unsigned HOST_WIDE_INT". */
7187 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7188 && CONST_INT_P (op1)
7189 && (HWI_COMPUTABLE_MODE_P (mode)
7190 || INTVAL (op1) > 0))
7191 {
7192 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7193 if (GET_CODE (x) != AND)
7194 return x;
7195
7196 op0 = XEXP (x, 0);
7197 op1 = XEXP (x, 1);
7198 }
7199
7200 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7201 apply the distributive law and then the inverse distributive
7202 law to see if things simplify. */
7203 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7204 {
7205 rtx result = distribute_and_simplify_rtx (x, 0);
7206 if (result)
7207 return result;
7208 }
7209 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7210 {
7211 rtx result = distribute_and_simplify_rtx (x, 1);
7212 if (result)
7213 return result;
7214 }
7215 break;
7216
7217 case IOR:
7218 /* If we have (ior (and A B) C), apply the distributive law and then
7219 the inverse distributive law to see if things simplify. */
7220
7221 if (GET_CODE (op0) == AND)
7222 {
7223 rtx result = distribute_and_simplify_rtx (x, 0);
7224 if (result)
7225 return result;
7226 }
7227
7228 if (GET_CODE (op1) == AND)
7229 {
7230 rtx result = distribute_and_simplify_rtx (x, 1);
7231 if (result)
7232 return result;
7233 }
7234 break;
7235
7236 default:
7237 gcc_unreachable ();
7238 }
7239
7240 return x;
7241 }
7242 \f
7243 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7244 operations" because they can be replaced with two more basic operations.
7245 ZERO_EXTEND is also considered "compound" because it can be replaced with
7246 an AND operation, which is simpler, though only one operation.
7247
7248 The function expand_compound_operation is called with an rtx expression
7249 and will convert it to the appropriate shifts and AND operations,
7250 simplifying at each stage.
7251
7252 The function make_compound_operation is called to convert an expression
7253 consisting of shifts and ANDs into the equivalent compound expression.
7254 It is the inverse of this function, loosely speaking. */
7255
7256 static rtx
7257 expand_compound_operation (rtx x)
7258 {
7259 unsigned HOST_WIDE_INT pos = 0, len;
7260 int unsignedp = 0;
7261 unsigned int modewidth;
7262 rtx tem;
7263 scalar_int_mode inner_mode;
7264
7265 switch (GET_CODE (x))
7266 {
7267 case ZERO_EXTEND:
7268 unsignedp = 1;
7269 /* FALLTHRU */
7270 case SIGN_EXTEND:
7271 /* We can't necessarily use a const_int for a multiword mode;
7272 it depends on implicitly extending the value.
7273 Since we don't know the right way to extend it,
7274 we can't tell whether the implicit way is right.
7275
7276 Even for a mode that is no wider than a const_int,
7277 we can't win, because we need to sign extend one of its bits through
7278 the rest of it, and we don't know which bit. */
7279 if (CONST_INT_P (XEXP (x, 0)))
7280 return x;
7281
7282 /* Reject modes that aren't scalar integers because turning vector
7283 or complex modes into shifts causes problems. */
7284 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7285 return x;
7286
7287 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7288 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7289 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7290 reloaded. If not for that, MEM's would very rarely be safe.
7291
7292 Reject modes bigger than a word, because we might not be able
7293 to reference a two-register group starting with an arbitrary register
7294 (and currently gen_lowpart might crash for a SUBREG). */
7295
7296 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7297 return x;
7298
7299 len = GET_MODE_PRECISION (inner_mode);
7300 /* If the inner object has VOIDmode (the only way this can happen
7301 is if it is an ASM_OPERANDS), we can't do anything since we don't
7302 know how much masking to do. */
7303 if (len == 0)
7304 return x;
7305
7306 break;
7307
7308 case ZERO_EXTRACT:
7309 unsignedp = 1;
7310
7311 /* fall through */
7312
7313 case SIGN_EXTRACT:
7314 /* If the operand is a CLOBBER, just return it. */
7315 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7316 return XEXP (x, 0);
7317
7318 if (!CONST_INT_P (XEXP (x, 1))
7319 || !CONST_INT_P (XEXP (x, 2)))
7320 return x;
7321
7322 /* Reject modes that aren't scalar integers because turning vector
7323 or complex modes into shifts causes problems. */
7324 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7325 return x;
7326
7327 len = INTVAL (XEXP (x, 1));
7328 pos = INTVAL (XEXP (x, 2));
7329
7330 /* This should stay within the object being extracted, fail otherwise. */
7331 if (len + pos > GET_MODE_PRECISION (inner_mode))
7332 return x;
7333
7334 if (BITS_BIG_ENDIAN)
7335 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7336
7337 break;
7338
7339 default:
7340 return x;
7341 }
7342
7343 /* We've rejected non-scalar operations by now. */
7344 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7345
7346 /* Convert sign extension to zero extension, if we know that the high
7347 bit is not set, as this is easier to optimize. It will be converted
7348 back to cheaper alternative in make_extraction. */
7349 if (GET_CODE (x) == SIGN_EXTEND
7350 && HWI_COMPUTABLE_MODE_P (mode)
7351 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7352 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7353 == 0))
7354 {
7355 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7356 rtx temp2 = expand_compound_operation (temp);
7357
7358 /* Make sure this is a profitable operation. */
7359 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7360 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7361 return temp2;
7362 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7363 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7364 return temp;
7365 else
7366 return x;
7367 }
7368
7369 /* We can optimize some special cases of ZERO_EXTEND. */
7370 if (GET_CODE (x) == ZERO_EXTEND)
7371 {
7372 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7373 know that the last value didn't have any inappropriate bits
7374 set. */
7375 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7376 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7377 && HWI_COMPUTABLE_MODE_P (mode)
7378 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7379 & ~GET_MODE_MASK (inner_mode)) == 0)
7380 return XEXP (XEXP (x, 0), 0);
7381
7382 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7383 if (GET_CODE (XEXP (x, 0)) == SUBREG
7384 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7385 && subreg_lowpart_p (XEXP (x, 0))
7386 && HWI_COMPUTABLE_MODE_P (mode)
7387 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7388 & ~GET_MODE_MASK (inner_mode)) == 0)
7389 return SUBREG_REG (XEXP (x, 0));
7390
7391 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7392 is a comparison and STORE_FLAG_VALUE permits. This is like
7393 the first case, but it works even when MODE is larger
7394 than HOST_WIDE_INT. */
7395 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7396 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7397 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7398 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7399 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7400 return XEXP (XEXP (x, 0), 0);
7401
7402 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7403 if (GET_CODE (XEXP (x, 0)) == SUBREG
7404 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7405 && subreg_lowpart_p (XEXP (x, 0))
7406 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7407 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7408 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7409 return SUBREG_REG (XEXP (x, 0));
7410
7411 }
7412
7413 /* If we reach here, we want to return a pair of shifts. The inner
7414 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7415 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7416 logical depending on the value of UNSIGNEDP.
7417
7418 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7419 converted into an AND of a shift.
7420
7421 We must check for the case where the left shift would have a negative
7422 count. This can happen in a case like (x >> 31) & 255 on machines
7423 that can't shift by a constant. On those machines, we would first
7424 combine the shift with the AND to produce a variable-position
7425 extraction. Then the constant of 31 would be substituted in
7426 to produce such a position. */
7427
7428 modewidth = GET_MODE_PRECISION (mode);
7429 if (modewidth >= pos + len)
7430 {
7431 tem = gen_lowpart (mode, XEXP (x, 0));
7432 if (!tem || GET_CODE (tem) == CLOBBER)
7433 return x;
7434 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7435 tem, modewidth - pos - len);
7436 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7437 mode, tem, modewidth - len);
7438 }
7439 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7440 tem = simplify_and_const_int (NULL_RTX, mode,
7441 simplify_shift_const (NULL_RTX, LSHIFTRT,
7442 mode, XEXP (x, 0),
7443 pos),
7444 (HOST_WIDE_INT_1U << len) - 1);
7445 else
7446 /* Any other cases we can't handle. */
7447 return x;
7448
7449 /* If we couldn't do this for some reason, return the original
7450 expression. */
7451 if (GET_CODE (tem) == CLOBBER)
7452 return x;
7453
7454 return tem;
7455 }
7456 \f
7457 /* X is a SET which contains an assignment of one object into
7458 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7459 or certain SUBREGS). If possible, convert it into a series of
7460 logical operations.
7461
7462 We half-heartedly support variable positions, but do not at all
7463 support variable lengths. */
7464
7465 static const_rtx
7466 expand_field_assignment (const_rtx x)
7467 {
7468 rtx inner;
7469 rtx pos; /* Always counts from low bit. */
7470 int len, inner_len;
7471 rtx mask, cleared, masked;
7472 scalar_int_mode compute_mode;
7473
7474 /* Loop until we find something we can't simplify. */
7475 while (1)
7476 {
7477 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7478 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7479 {
7480 rtx x0 = XEXP (SET_DEST (x), 0);
7481 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7482 break;
7483 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7484 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7485 MAX_MODE_INT);
7486 }
7487 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7488 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7489 {
7490 inner = XEXP (SET_DEST (x), 0);
7491 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7492 break;
7493
7494 len = INTVAL (XEXP (SET_DEST (x), 1));
7495 pos = XEXP (SET_DEST (x), 2);
7496
7497 /* A constant position should stay within the width of INNER. */
7498 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7499 break;
7500
7501 if (BITS_BIG_ENDIAN)
7502 {
7503 if (CONST_INT_P (pos))
7504 pos = GEN_INT (inner_len - len - INTVAL (pos));
7505 else if (GET_CODE (pos) == MINUS
7506 && CONST_INT_P (XEXP (pos, 1))
7507 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7508 /* If position is ADJUST - X, new position is X. */
7509 pos = XEXP (pos, 0);
7510 else
7511 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7512 gen_int_mode (inner_len - len,
7513 GET_MODE (pos)),
7514 pos);
7515 }
7516 }
7517
7518 /* If the destination is a subreg that overwrites the whole of the inner
7519 register, we can move the subreg to the source. */
7520 else if (GET_CODE (SET_DEST (x)) == SUBREG
7521 /* We need SUBREGs to compute nonzero_bits properly. */
7522 && nonzero_sign_valid
7523 && !read_modify_subreg_p (SET_DEST (x)))
7524 {
7525 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7526 gen_lowpart
7527 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7528 SET_SRC (x)));
7529 continue;
7530 }
7531 else
7532 break;
7533
7534 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7535 inner = SUBREG_REG (inner);
7536
7537 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7538 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7539 {
7540 /* Don't do anything for vector or complex integral types. */
7541 if (! FLOAT_MODE_P (GET_MODE (inner)))
7542 break;
7543
7544 /* Try to find an integral mode to pun with. */
7545 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7546 .exists (&compute_mode))
7547 break;
7548
7549 inner = gen_lowpart (compute_mode, inner);
7550 }
7551
7552 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7553 if (len >= HOST_BITS_PER_WIDE_INT)
7554 break;
7555
7556 /* Don't try to compute in too wide unsupported modes. */
7557 if (!targetm.scalar_mode_supported_p (compute_mode))
7558 break;
7559
7560 /* Now compute the equivalent expression. Make a copy of INNER
7561 for the SET_DEST in case it is a MEM into which we will substitute;
7562 we don't want shared RTL in that case. */
7563 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7564 compute_mode);
7565 cleared = simplify_gen_binary (AND, compute_mode,
7566 simplify_gen_unary (NOT, compute_mode,
7567 simplify_gen_binary (ASHIFT,
7568 compute_mode,
7569 mask, pos),
7570 compute_mode),
7571 inner);
7572 masked = simplify_gen_binary (ASHIFT, compute_mode,
7573 simplify_gen_binary (
7574 AND, compute_mode,
7575 gen_lowpart (compute_mode, SET_SRC (x)),
7576 mask),
7577 pos);
7578
7579 x = gen_rtx_SET (copy_rtx (inner),
7580 simplify_gen_binary (IOR, compute_mode,
7581 cleared, masked));
7582 }
7583
7584 return x;
7585 }
7586 \f
7587 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7588 it is an RTX that represents the (variable) starting position; otherwise,
7589 POS is the (constant) starting bit position. Both are counted from the LSB.
7590
7591 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7592
7593 IN_DEST is nonzero if this is a reference in the destination of a SET.
7594 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7595 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7596 be used.
7597
7598 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7599 ZERO_EXTRACT should be built even for bits starting at bit 0.
7600
7601 MODE is the desired mode of the result (if IN_DEST == 0).
7602
7603 The result is an RTX for the extraction or NULL_RTX if the target
7604 can't handle it. */
7605
7606 static rtx
7607 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7608 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7609 int in_dest, int in_compare)
7610 {
7611 /* This mode describes the size of the storage area
7612 to fetch the overall value from. Within that, we
7613 ignore the POS lowest bits, etc. */
7614 machine_mode is_mode = GET_MODE (inner);
7615 machine_mode inner_mode;
7616 scalar_int_mode wanted_inner_mode;
7617 scalar_int_mode wanted_inner_reg_mode = word_mode;
7618 scalar_int_mode pos_mode = word_mode;
7619 machine_mode extraction_mode = word_mode;
7620 rtx new_rtx = 0;
7621 rtx orig_pos_rtx = pos_rtx;
7622 HOST_WIDE_INT orig_pos;
7623
7624 if (pos_rtx && CONST_INT_P (pos_rtx))
7625 pos = INTVAL (pos_rtx), pos_rtx = 0;
7626
7627 if (GET_CODE (inner) == SUBREG
7628 && subreg_lowpart_p (inner)
7629 && (paradoxical_subreg_p (inner)
7630 /* If trying or potentionally trying to extract
7631 bits outside of is_mode, don't look through
7632 non-paradoxical SUBREGs. See PR82192. */
7633 || (pos_rtx == NULL_RTX
7634 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7635 {
7636 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7637 consider just the QI as the memory to extract from.
7638 The subreg adds or removes high bits; its mode is
7639 irrelevant to the meaning of this extraction,
7640 since POS and LEN count from the lsb. */
7641 if (MEM_P (SUBREG_REG (inner)))
7642 is_mode = GET_MODE (SUBREG_REG (inner));
7643 inner = SUBREG_REG (inner);
7644 }
7645 else if (GET_CODE (inner) == ASHIFT
7646 && CONST_INT_P (XEXP (inner, 1))
7647 && pos_rtx == 0 && pos == 0
7648 && len > UINTVAL (XEXP (inner, 1)))
7649 {
7650 /* We're extracting the least significant bits of an rtx
7651 (ashift X (const_int C)), where LEN > C. Extract the
7652 least significant (LEN - C) bits of X, giving an rtx
7653 whose mode is MODE, then shift it left C times. */
7654 new_rtx = make_extraction (mode, XEXP (inner, 0),
7655 0, 0, len - INTVAL (XEXP (inner, 1)),
7656 unsignedp, in_dest, in_compare);
7657 if (new_rtx != 0)
7658 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7659 }
7660 else if (GET_CODE (inner) == TRUNCATE
7661 /* If trying or potentionally trying to extract
7662 bits outside of is_mode, don't look through
7663 TRUNCATE. See PR82192. */
7664 && pos_rtx == NULL_RTX
7665 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7666 inner = XEXP (inner, 0);
7667
7668 inner_mode = GET_MODE (inner);
7669
7670 /* See if this can be done without an extraction. We never can if the
7671 width of the field is not the same as that of some integer mode. For
7672 registers, we can only avoid the extraction if the position is at the
7673 low-order bit and this is either not in the destination or we have the
7674 appropriate STRICT_LOW_PART operation available.
7675
7676 For MEM, we can avoid an extract if the field starts on an appropriate
7677 boundary and we can change the mode of the memory reference. */
7678
7679 scalar_int_mode tmode;
7680 if (int_mode_for_size (len, 1).exists (&tmode)
7681 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7682 && !MEM_P (inner)
7683 && (pos == 0 || REG_P (inner))
7684 && (inner_mode == tmode
7685 || !REG_P (inner)
7686 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7687 || reg_truncated_to_mode (tmode, inner))
7688 && (! in_dest
7689 || (REG_P (inner)
7690 && have_insn_for (STRICT_LOW_PART, tmode))))
7691 || (MEM_P (inner) && pos_rtx == 0
7692 && (pos
7693 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7694 : BITS_PER_UNIT)) == 0
7695 /* We can't do this if we are widening INNER_MODE (it
7696 may not be aligned, for one thing). */
7697 && !paradoxical_subreg_p (tmode, inner_mode)
7698 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7699 && (inner_mode == tmode
7700 || (! mode_dependent_address_p (XEXP (inner, 0),
7701 MEM_ADDR_SPACE (inner))
7702 && ! MEM_VOLATILE_P (inner))))))
7703 {
7704 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7705 field. If the original and current mode are the same, we need not
7706 adjust the offset. Otherwise, we do if bytes big endian.
7707
7708 If INNER is not a MEM, get a piece consisting of just the field
7709 of interest (in this case POS % BITS_PER_WORD must be 0). */
7710
7711 if (MEM_P (inner))
7712 {
7713 poly_int64 offset;
7714
7715 /* POS counts from lsb, but make OFFSET count in memory order. */
7716 if (BYTES_BIG_ENDIAN)
7717 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7718 - len - pos);
7719 else
7720 offset = pos / BITS_PER_UNIT;
7721
7722 new_rtx = adjust_address_nv (inner, tmode, offset);
7723 }
7724 else if (REG_P (inner))
7725 {
7726 if (tmode != inner_mode)
7727 {
7728 /* We can't call gen_lowpart in a DEST since we
7729 always want a SUBREG (see below) and it would sometimes
7730 return a new hard register. */
7731 if (pos || in_dest)
7732 {
7733 poly_uint64 offset
7734 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7735
7736 /* Avoid creating invalid subregs, for example when
7737 simplifying (x>>32)&255. */
7738 if (!validate_subreg (tmode, inner_mode, inner, offset))
7739 return NULL_RTX;
7740
7741 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7742 }
7743 else
7744 new_rtx = gen_lowpart (tmode, inner);
7745 }
7746 else
7747 new_rtx = inner;
7748 }
7749 else
7750 new_rtx = force_to_mode (inner, tmode,
7751 len >= HOST_BITS_PER_WIDE_INT
7752 ? HOST_WIDE_INT_M1U
7753 : (HOST_WIDE_INT_1U << len) - 1, 0);
7754
7755 /* If this extraction is going into the destination of a SET,
7756 make a STRICT_LOW_PART unless we made a MEM. */
7757
7758 if (in_dest)
7759 return (MEM_P (new_rtx) ? new_rtx
7760 : (GET_CODE (new_rtx) != SUBREG
7761 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7762 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7763
7764 if (mode == tmode)
7765 return new_rtx;
7766
7767 if (CONST_SCALAR_INT_P (new_rtx))
7768 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7769 mode, new_rtx, tmode);
7770
7771 /* If we know that no extraneous bits are set, and that the high
7772 bit is not set, convert the extraction to the cheaper of
7773 sign and zero extension, that are equivalent in these cases. */
7774 if (flag_expensive_optimizations
7775 && (HWI_COMPUTABLE_MODE_P (tmode)
7776 && ((nonzero_bits (new_rtx, tmode)
7777 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7778 == 0)))
7779 {
7780 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7781 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7782
7783 /* Prefer ZERO_EXTENSION, since it gives more information to
7784 backends. */
7785 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7786 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7787 return temp;
7788 return temp1;
7789 }
7790
7791 /* Otherwise, sign- or zero-extend unless we already are in the
7792 proper mode. */
7793
7794 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7795 mode, new_rtx));
7796 }
7797
7798 /* Unless this is a COMPARE or we have a funny memory reference,
7799 don't do anything with zero-extending field extracts starting at
7800 the low-order bit since they are simple AND operations. */
7801 if (pos_rtx == 0 && pos == 0 && ! in_dest
7802 && ! in_compare && unsignedp)
7803 return 0;
7804
7805 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7806 if the position is not a constant and the length is not 1. In all
7807 other cases, we would only be going outside our object in cases when
7808 an original shift would have been undefined. */
7809 if (MEM_P (inner)
7810 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7811 || (pos_rtx != 0 && len != 1)))
7812 return 0;
7813
7814 enum extraction_pattern pattern = (in_dest ? EP_insv
7815 : unsignedp ? EP_extzv : EP_extv);
7816
7817 /* If INNER is not from memory, we want it to have the mode of a register
7818 extraction pattern's structure operand, or word_mode if there is no
7819 such pattern. The same applies to extraction_mode and pos_mode
7820 and their respective operands.
7821
7822 For memory, assume that the desired extraction_mode and pos_mode
7823 are the same as for a register operation, since at present we don't
7824 have named patterns for aligned memory structures. */
7825 class extraction_insn insn;
7826 unsigned int inner_size;
7827 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7828 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7829 {
7830 wanted_inner_reg_mode = insn.struct_mode.require ();
7831 pos_mode = insn.pos_mode;
7832 extraction_mode = insn.field_mode;
7833 }
7834
7835 /* Never narrow an object, since that might not be safe. */
7836
7837 if (mode != VOIDmode
7838 && partial_subreg_p (extraction_mode, mode))
7839 extraction_mode = mode;
7840
7841 /* Punt if len is too large for extraction_mode. */
7842 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7843 return NULL_RTX;
7844
7845 if (!MEM_P (inner))
7846 wanted_inner_mode = wanted_inner_reg_mode;
7847 else
7848 {
7849 /* Be careful not to go beyond the extracted object and maintain the
7850 natural alignment of the memory. */
7851 wanted_inner_mode = smallest_int_mode_for_size (len);
7852 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7853 > GET_MODE_BITSIZE (wanted_inner_mode))
7854 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7855 }
7856
7857 orig_pos = pos;
7858
7859 if (BITS_BIG_ENDIAN)
7860 {
7861 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7862 BITS_BIG_ENDIAN style. If position is constant, compute new
7863 position. Otherwise, build subtraction.
7864 Note that POS is relative to the mode of the original argument.
7865 If it's a MEM we need to recompute POS relative to that.
7866 However, if we're extracting from (or inserting into) a register,
7867 we want to recompute POS relative to wanted_inner_mode. */
7868 int width;
7869 if (!MEM_P (inner))
7870 width = GET_MODE_BITSIZE (wanted_inner_mode);
7871 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7872 return NULL_RTX;
7873
7874 if (pos_rtx == 0)
7875 pos = width - len - pos;
7876 else
7877 pos_rtx
7878 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7879 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7880 pos_rtx);
7881 /* POS may be less than 0 now, but we check for that below.
7882 Note that it can only be less than 0 if !MEM_P (inner). */
7883 }
7884
7885 /* If INNER has a wider mode, and this is a constant extraction, try to
7886 make it smaller and adjust the byte to point to the byte containing
7887 the value. */
7888 if (wanted_inner_mode != VOIDmode
7889 && inner_mode != wanted_inner_mode
7890 && ! pos_rtx
7891 && partial_subreg_p (wanted_inner_mode, is_mode)
7892 && MEM_P (inner)
7893 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7894 && ! MEM_VOLATILE_P (inner))
7895 {
7896 poly_int64 offset = 0;
7897
7898 /* The computations below will be correct if the machine is big
7899 endian in both bits and bytes or little endian in bits and bytes.
7900 If it is mixed, we must adjust. */
7901
7902 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7903 adjust OFFSET to compensate. */
7904 if (BYTES_BIG_ENDIAN
7905 && paradoxical_subreg_p (is_mode, inner_mode))
7906 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7907
7908 /* We can now move to the desired byte. */
7909 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7910 * GET_MODE_SIZE (wanted_inner_mode);
7911 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7912
7913 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7914 && is_mode != wanted_inner_mode)
7915 offset = (GET_MODE_SIZE (is_mode)
7916 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7917
7918 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7919 }
7920
7921 /* If INNER is not memory, get it into the proper mode. If we are changing
7922 its mode, POS must be a constant and smaller than the size of the new
7923 mode. */
7924 else if (!MEM_P (inner))
7925 {
7926 /* On the LHS, don't create paradoxical subregs implicitely truncating
7927 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7928 if (in_dest
7929 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7930 wanted_inner_mode))
7931 return NULL_RTX;
7932
7933 if (GET_MODE (inner) != wanted_inner_mode
7934 && (pos_rtx != 0
7935 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7936 return NULL_RTX;
7937
7938 if (orig_pos < 0)
7939 return NULL_RTX;
7940
7941 inner = force_to_mode (inner, wanted_inner_mode,
7942 pos_rtx
7943 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7944 ? HOST_WIDE_INT_M1U
7945 : (((HOST_WIDE_INT_1U << len) - 1)
7946 << orig_pos),
7947 0);
7948 }
7949
7950 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7951 have to zero extend. Otherwise, we can just use a SUBREG.
7952
7953 We dealt with constant rtxes earlier, so pos_rtx cannot
7954 have VOIDmode at this point. */
7955 if (pos_rtx != 0
7956 && (GET_MODE_SIZE (pos_mode)
7957 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7958 {
7959 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7960 GET_MODE (pos_rtx));
7961
7962 /* If we know that no extraneous bits are set, and that the high
7963 bit is not set, convert extraction to cheaper one - either
7964 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7965 cases. */
7966 if (flag_expensive_optimizations
7967 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7968 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7969 & ~(((unsigned HOST_WIDE_INT)
7970 GET_MODE_MASK (GET_MODE (pos_rtx)))
7971 >> 1))
7972 == 0)))
7973 {
7974 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7975 GET_MODE (pos_rtx));
7976
7977 /* Prefer ZERO_EXTENSION, since it gives more information to
7978 backends. */
7979 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7980 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7981 temp = temp1;
7982 }
7983 pos_rtx = temp;
7984 }
7985
7986 /* Make POS_RTX unless we already have it and it is correct. If we don't
7987 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7988 be a CONST_INT. */
7989 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7990 pos_rtx = orig_pos_rtx;
7991
7992 else if (pos_rtx == 0)
7993 pos_rtx = GEN_INT (pos);
7994
7995 /* Make the required operation. See if we can use existing rtx. */
7996 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7997 extraction_mode, inner, GEN_INT (len), pos_rtx);
7998 if (! in_dest)
7999 new_rtx = gen_lowpart (mode, new_rtx);
8000
8001 return new_rtx;
8002 }
8003 \f
8004 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8005 can be commuted with any other operations in X. Return X without
8006 that shift if so. */
8007
8008 static rtx
8009 extract_left_shift (scalar_int_mode mode, rtx x, int count)
8010 {
8011 enum rtx_code code = GET_CODE (x);
8012 rtx tem;
8013
8014 switch (code)
8015 {
8016 case ASHIFT:
8017 /* This is the shift itself. If it is wide enough, we will return
8018 either the value being shifted if the shift count is equal to
8019 COUNT or a shift for the difference. */
8020 if (CONST_INT_P (XEXP (x, 1))
8021 && INTVAL (XEXP (x, 1)) >= count)
8022 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
8023 INTVAL (XEXP (x, 1)) - count);
8024 break;
8025
8026 case NEG: case NOT:
8027 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8028 return simplify_gen_unary (code, mode, tem, mode);
8029
8030 break;
8031
8032 case PLUS: case IOR: case XOR: case AND:
8033 /* If we can safely shift this constant and we find the inner shift,
8034 make a new operation. */
8035 if (CONST_INT_P (XEXP (x, 1))
8036 && (UINTVAL (XEXP (x, 1))
8037 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
8038 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8039 {
8040 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
8041 return simplify_gen_binary (code, mode, tem,
8042 gen_int_mode (val, mode));
8043 }
8044 break;
8045
8046 default:
8047 break;
8048 }
8049
8050 return 0;
8051 }
8052 \f
8053 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
8054 level of the expression and MODE is its mode. IN_CODE is as for
8055 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
8056 that should be used when recursing on operands of *X_PTR.
8057
8058 There are two possible actions:
8059
8060 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
8061 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8062
8063 - Return a new rtx, which the caller returns directly. */
8064
8065 static rtx
8066 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
8067 enum rtx_code in_code,
8068 enum rtx_code *next_code_ptr)
8069 {
8070 rtx x = *x_ptr;
8071 enum rtx_code next_code = *next_code_ptr;
8072 enum rtx_code code = GET_CODE (x);
8073 int mode_width = GET_MODE_PRECISION (mode);
8074 rtx rhs, lhs;
8075 rtx new_rtx = 0;
8076 int i;
8077 rtx tem;
8078 scalar_int_mode inner_mode;
8079 bool equality_comparison = false;
8080
8081 if (in_code == EQ)
8082 {
8083 equality_comparison = true;
8084 in_code = COMPARE;
8085 }
8086
8087 /* Process depending on the code of this operation. If NEW is set
8088 nonzero, it will be returned. */
8089
8090 switch (code)
8091 {
8092 case ASHIFT:
8093 /* Convert shifts by constants into multiplications if inside
8094 an address. */
8095 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8096 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8097 && INTVAL (XEXP (x, 1)) >= 0)
8098 {
8099 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8100 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8101
8102 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8103 if (GET_CODE (new_rtx) == NEG)
8104 {
8105 new_rtx = XEXP (new_rtx, 0);
8106 multval = -multval;
8107 }
8108 multval = trunc_int_for_mode (multval, mode);
8109 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8110 }
8111 break;
8112
8113 case PLUS:
8114 lhs = XEXP (x, 0);
8115 rhs = XEXP (x, 1);
8116 lhs = make_compound_operation (lhs, next_code);
8117 rhs = make_compound_operation (rhs, next_code);
8118 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8119 {
8120 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8121 XEXP (lhs, 1));
8122 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8123 }
8124 else if (GET_CODE (lhs) == MULT
8125 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8126 {
8127 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8128 simplify_gen_unary (NEG, mode,
8129 XEXP (lhs, 1),
8130 mode));
8131 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8132 }
8133 else
8134 {
8135 SUBST (XEXP (x, 0), lhs);
8136 SUBST (XEXP (x, 1), rhs);
8137 }
8138 maybe_swap_commutative_operands (x);
8139 return x;
8140
8141 case MINUS:
8142 lhs = XEXP (x, 0);
8143 rhs = XEXP (x, 1);
8144 lhs = make_compound_operation (lhs, next_code);
8145 rhs = make_compound_operation (rhs, next_code);
8146 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8147 {
8148 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8149 XEXP (rhs, 1));
8150 return simplify_gen_binary (PLUS, mode, tem, lhs);
8151 }
8152 else if (GET_CODE (rhs) == MULT
8153 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8154 {
8155 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8156 simplify_gen_unary (NEG, mode,
8157 XEXP (rhs, 1),
8158 mode));
8159 return simplify_gen_binary (PLUS, mode, tem, lhs);
8160 }
8161 else
8162 {
8163 SUBST (XEXP (x, 0), lhs);
8164 SUBST (XEXP (x, 1), rhs);
8165 return x;
8166 }
8167
8168 case AND:
8169 /* If the second operand is not a constant, we can't do anything
8170 with it. */
8171 if (!CONST_INT_P (XEXP (x, 1)))
8172 break;
8173
8174 /* If the constant is a power of two minus one and the first operand
8175 is a logical right shift, make an extraction. */
8176 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8177 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8178 {
8179 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8180 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8181 i, 1, 0, in_code == COMPARE);
8182 }
8183
8184 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8185 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8186 && subreg_lowpart_p (XEXP (x, 0))
8187 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8188 &inner_mode)
8189 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8190 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8191 {
8192 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8193 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8194 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8195 XEXP (inner_x0, 1),
8196 i, 1, 0, in_code == COMPARE);
8197
8198 /* If we narrowed the mode when dropping the subreg, then we lose. */
8199 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8200 new_rtx = NULL;
8201
8202 /* If that didn't give anything, see if the AND simplifies on
8203 its own. */
8204 if (!new_rtx && i >= 0)
8205 {
8206 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8207 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8208 0, in_code == COMPARE);
8209 }
8210 }
8211 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8212 else if ((GET_CODE (XEXP (x, 0)) == XOR
8213 || GET_CODE (XEXP (x, 0)) == IOR)
8214 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8215 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8216 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8217 {
8218 /* Apply the distributive law, and then try to make extractions. */
8219 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8220 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8221 XEXP (x, 1)),
8222 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8223 XEXP (x, 1)));
8224 new_rtx = make_compound_operation (new_rtx, in_code);
8225 }
8226
8227 /* If we are have (and (rotate X C) M) and C is larger than the number
8228 of bits in M, this is an extraction. */
8229
8230 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8231 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8232 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8233 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8234 {
8235 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8236 new_rtx = make_extraction (mode, new_rtx,
8237 (GET_MODE_PRECISION (mode)
8238 - INTVAL (XEXP (XEXP (x, 0), 1))),
8239 NULL_RTX, i, 1, 0, in_code == COMPARE);
8240 }
8241
8242 /* On machines without logical shifts, if the operand of the AND is
8243 a logical shift and our mask turns off all the propagated sign
8244 bits, we can replace the logical shift with an arithmetic shift. */
8245 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8246 && !have_insn_for (LSHIFTRT, mode)
8247 && have_insn_for (ASHIFTRT, mode)
8248 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8249 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8250 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8251 && mode_width <= HOST_BITS_PER_WIDE_INT)
8252 {
8253 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8254
8255 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8256 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8257 SUBST (XEXP (x, 0),
8258 gen_rtx_ASHIFTRT (mode,
8259 make_compound_operation (XEXP (XEXP (x,
8260 0),
8261 0),
8262 next_code),
8263 XEXP (XEXP (x, 0), 1)));
8264 }
8265
8266 /* If the constant is one less than a power of two, this might be
8267 representable by an extraction even if no shift is present.
8268 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8269 we are in a COMPARE. */
8270 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8271 new_rtx = make_extraction (mode,
8272 make_compound_operation (XEXP (x, 0),
8273 next_code),
8274 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8275
8276 /* If we are in a comparison and this is an AND with a power of two,
8277 convert this into the appropriate bit extract. */
8278 else if (in_code == COMPARE
8279 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8280 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8281 new_rtx = make_extraction (mode,
8282 make_compound_operation (XEXP (x, 0),
8283 next_code),
8284 i, NULL_RTX, 1, 1, 0, 1);
8285
8286 /* If the one operand is a paradoxical subreg of a register or memory and
8287 the constant (limited to the smaller mode) has only zero bits where
8288 the sub expression has known zero bits, this can be expressed as
8289 a zero_extend. */
8290 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8291 {
8292 rtx sub;
8293
8294 sub = XEXP (XEXP (x, 0), 0);
8295 machine_mode sub_mode = GET_MODE (sub);
8296 int sub_width;
8297 if ((REG_P (sub) || MEM_P (sub))
8298 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8299 && sub_width < mode_width)
8300 {
8301 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8302 unsigned HOST_WIDE_INT mask;
8303
8304 /* original AND constant with all the known zero bits set */
8305 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8306 if ((mask & mode_mask) == mode_mask)
8307 {
8308 new_rtx = make_compound_operation (sub, next_code);
8309 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8310 1, 0, in_code == COMPARE);
8311 }
8312 }
8313 }
8314
8315 break;
8316
8317 case LSHIFTRT:
8318 /* If the sign bit is known to be zero, replace this with an
8319 arithmetic shift. */
8320 if (have_insn_for (ASHIFTRT, mode)
8321 && ! have_insn_for (LSHIFTRT, mode)
8322 && mode_width <= HOST_BITS_PER_WIDE_INT
8323 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8324 {
8325 new_rtx = gen_rtx_ASHIFTRT (mode,
8326 make_compound_operation (XEXP (x, 0),
8327 next_code),
8328 XEXP (x, 1));
8329 break;
8330 }
8331
8332 /* fall through */
8333
8334 case ASHIFTRT:
8335 lhs = XEXP (x, 0);
8336 rhs = XEXP (x, 1);
8337
8338 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8339 this is a SIGN_EXTRACT. */
8340 if (CONST_INT_P (rhs)
8341 && GET_CODE (lhs) == ASHIFT
8342 && CONST_INT_P (XEXP (lhs, 1))
8343 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8344 && INTVAL (XEXP (lhs, 1)) >= 0
8345 && INTVAL (rhs) < mode_width)
8346 {
8347 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8348 new_rtx = make_extraction (mode, new_rtx,
8349 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8350 NULL_RTX, mode_width - INTVAL (rhs),
8351 code == LSHIFTRT, 0, in_code == COMPARE);
8352 break;
8353 }
8354
8355 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8356 If so, try to merge the shifts into a SIGN_EXTEND. We could
8357 also do this for some cases of SIGN_EXTRACT, but it doesn't
8358 seem worth the effort; the case checked for occurs on Alpha. */
8359
8360 if (!OBJECT_P (lhs)
8361 && ! (GET_CODE (lhs) == SUBREG
8362 && (OBJECT_P (SUBREG_REG (lhs))))
8363 && CONST_INT_P (rhs)
8364 && INTVAL (rhs) >= 0
8365 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8366 && INTVAL (rhs) < mode_width
8367 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8368 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8369 next_code),
8370 0, NULL_RTX, mode_width - INTVAL (rhs),
8371 code == LSHIFTRT, 0, in_code == COMPARE);
8372
8373 break;
8374
8375 case SUBREG:
8376 /* Call ourselves recursively on the inner expression. If we are
8377 narrowing the object and it has a different RTL code from
8378 what it originally did, do this SUBREG as a force_to_mode. */
8379 {
8380 rtx inner = SUBREG_REG (x), simplified;
8381 enum rtx_code subreg_code = in_code;
8382
8383 /* If the SUBREG is masking of a logical right shift,
8384 make an extraction. */
8385 if (GET_CODE (inner) == LSHIFTRT
8386 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8387 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8388 && CONST_INT_P (XEXP (inner, 1))
8389 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8390 && subreg_lowpart_p (x))
8391 {
8392 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8393 int width = GET_MODE_PRECISION (inner_mode)
8394 - INTVAL (XEXP (inner, 1));
8395 if (width > mode_width)
8396 width = mode_width;
8397 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8398 width, 1, 0, in_code == COMPARE);
8399 break;
8400 }
8401
8402 /* If in_code is COMPARE, it isn't always safe to pass it through
8403 to the recursive make_compound_operation call. */
8404 if (subreg_code == COMPARE
8405 && (!subreg_lowpart_p (x)
8406 || GET_CODE (inner) == SUBREG
8407 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8408 is (const_int 0), rather than
8409 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8410 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8411 for non-equality comparisons against 0 is not equivalent
8412 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8413 || (GET_CODE (inner) == AND
8414 && CONST_INT_P (XEXP (inner, 1))
8415 && partial_subreg_p (x)
8416 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8417 >= GET_MODE_BITSIZE (mode) - 1)))
8418 subreg_code = SET;
8419
8420 tem = make_compound_operation (inner, subreg_code);
8421
8422 simplified
8423 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8424 if (simplified)
8425 tem = simplified;
8426
8427 if (GET_CODE (tem) != GET_CODE (inner)
8428 && partial_subreg_p (x)
8429 && subreg_lowpart_p (x))
8430 {
8431 rtx newer
8432 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8433
8434 /* If we have something other than a SUBREG, we might have
8435 done an expansion, so rerun ourselves. */
8436 if (GET_CODE (newer) != SUBREG)
8437 newer = make_compound_operation (newer, in_code);
8438
8439 /* force_to_mode can expand compounds. If it just re-expanded
8440 the compound, use gen_lowpart to convert to the desired
8441 mode. */
8442 if (rtx_equal_p (newer, x)
8443 /* Likewise if it re-expanded the compound only partially.
8444 This happens for SUBREG of ZERO_EXTRACT if they extract
8445 the same number of bits. */
8446 || (GET_CODE (newer) == SUBREG
8447 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8448 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8449 && GET_CODE (inner) == AND
8450 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8451 return gen_lowpart (GET_MODE (x), tem);
8452
8453 return newer;
8454 }
8455
8456 if (simplified)
8457 return tem;
8458 }
8459 break;
8460
8461 default:
8462 break;
8463 }
8464
8465 if (new_rtx)
8466 *x_ptr = gen_lowpart (mode, new_rtx);
8467 *next_code_ptr = next_code;
8468 return NULL_RTX;
8469 }
8470
8471 /* Look at the expression rooted at X. Look for expressions
8472 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8473 Form these expressions.
8474
8475 Return the new rtx, usually just X.
8476
8477 Also, for machines like the VAX that don't have logical shift insns,
8478 try to convert logical to arithmetic shift operations in cases where
8479 they are equivalent. This undoes the canonicalizations to logical
8480 shifts done elsewhere.
8481
8482 We try, as much as possible, to re-use rtl expressions to save memory.
8483
8484 IN_CODE says what kind of expression we are processing. Normally, it is
8485 SET. In a memory address it is MEM. When processing the arguments of
8486 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8487 precisely it is an equality comparison against zero. */
8488
8489 rtx
8490 make_compound_operation (rtx x, enum rtx_code in_code)
8491 {
8492 enum rtx_code code = GET_CODE (x);
8493 const char *fmt;
8494 int i, j;
8495 enum rtx_code next_code;
8496 rtx new_rtx, tem;
8497
8498 /* Select the code to be used in recursive calls. Once we are inside an
8499 address, we stay there. If we have a comparison, set to COMPARE,
8500 but once inside, go back to our default of SET. */
8501
8502 next_code = (code == MEM ? MEM
8503 : ((code == COMPARE || COMPARISON_P (x))
8504 && XEXP (x, 1) == const0_rtx) ? COMPARE
8505 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8506
8507 scalar_int_mode mode;
8508 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8509 {
8510 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8511 &next_code);
8512 if (new_rtx)
8513 return new_rtx;
8514 code = GET_CODE (x);
8515 }
8516
8517 /* Now recursively process each operand of this operation. We need to
8518 handle ZERO_EXTEND specially so that we don't lose track of the
8519 inner mode. */
8520 if (code == ZERO_EXTEND)
8521 {
8522 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8523 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8524 new_rtx, GET_MODE (XEXP (x, 0)));
8525 if (tem)
8526 return tem;
8527 SUBST (XEXP (x, 0), new_rtx);
8528 return x;
8529 }
8530
8531 fmt = GET_RTX_FORMAT (code);
8532 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8533 if (fmt[i] == 'e')
8534 {
8535 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8536 SUBST (XEXP (x, i), new_rtx);
8537 }
8538 else if (fmt[i] == 'E')
8539 for (j = 0; j < XVECLEN (x, i); j++)
8540 {
8541 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8542 SUBST (XVECEXP (x, i, j), new_rtx);
8543 }
8544
8545 maybe_swap_commutative_operands (x);
8546 return x;
8547 }
8548 \f
8549 /* Given M see if it is a value that would select a field of bits
8550 within an item, but not the entire word. Return -1 if not.
8551 Otherwise, return the starting position of the field, where 0 is the
8552 low-order bit.
8553
8554 *PLEN is set to the length of the field. */
8555
8556 static int
8557 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8558 {
8559 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8560 int pos = m ? ctz_hwi (m) : -1;
8561 int len = 0;
8562
8563 if (pos >= 0)
8564 /* Now shift off the low-order zero bits and see if we have a
8565 power of two minus 1. */
8566 len = exact_log2 ((m >> pos) + 1);
8567
8568 if (len <= 0)
8569 pos = -1;
8570
8571 *plen = len;
8572 return pos;
8573 }
8574 \f
8575 /* If X refers to a register that equals REG in value, replace these
8576 references with REG. */
8577 static rtx
8578 canon_reg_for_combine (rtx x, rtx reg)
8579 {
8580 rtx op0, op1, op2;
8581 const char *fmt;
8582 int i;
8583 bool copied;
8584
8585 enum rtx_code code = GET_CODE (x);
8586 switch (GET_RTX_CLASS (code))
8587 {
8588 case RTX_UNARY:
8589 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8590 if (op0 != XEXP (x, 0))
8591 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8592 GET_MODE (reg));
8593 break;
8594
8595 case RTX_BIN_ARITH:
8596 case RTX_COMM_ARITH:
8597 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8598 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8599 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8600 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8601 break;
8602
8603 case RTX_COMPARE:
8604 case RTX_COMM_COMPARE:
8605 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8606 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8607 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8608 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8609 GET_MODE (op0), op0, op1);
8610 break;
8611
8612 case RTX_TERNARY:
8613 case RTX_BITFIELD_OPS:
8614 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8615 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8616 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8617 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8618 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8619 GET_MODE (op0), op0, op1, op2);
8620 /* FALLTHRU */
8621
8622 case RTX_OBJ:
8623 if (REG_P (x))
8624 {
8625 if (rtx_equal_p (get_last_value (reg), x)
8626 || rtx_equal_p (reg, get_last_value (x)))
8627 return reg;
8628 else
8629 break;
8630 }
8631
8632 /* fall through */
8633
8634 default:
8635 fmt = GET_RTX_FORMAT (code);
8636 copied = false;
8637 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8638 if (fmt[i] == 'e')
8639 {
8640 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8641 if (op != XEXP (x, i))
8642 {
8643 if (!copied)
8644 {
8645 copied = true;
8646 x = copy_rtx (x);
8647 }
8648 XEXP (x, i) = op;
8649 }
8650 }
8651 else if (fmt[i] == 'E')
8652 {
8653 int j;
8654 for (j = 0; j < XVECLEN (x, i); j++)
8655 {
8656 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8657 if (op != XVECEXP (x, i, j))
8658 {
8659 if (!copied)
8660 {
8661 copied = true;
8662 x = copy_rtx (x);
8663 }
8664 XVECEXP (x, i, j) = op;
8665 }
8666 }
8667 }
8668
8669 break;
8670 }
8671
8672 return x;
8673 }
8674
8675 /* Return X converted to MODE. If the value is already truncated to
8676 MODE we can just return a subreg even though in the general case we
8677 would need an explicit truncation. */
8678
8679 static rtx
8680 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8681 {
8682 if (!CONST_INT_P (x)
8683 && partial_subreg_p (mode, GET_MODE (x))
8684 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8685 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8686 {
8687 /* Bit-cast X into an integer mode. */
8688 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8689 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8690 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8691 x, GET_MODE (x));
8692 }
8693
8694 return gen_lowpart (mode, x);
8695 }
8696
8697 /* See if X can be simplified knowing that we will only refer to it in
8698 MODE and will only refer to those bits that are nonzero in MASK.
8699 If other bits are being computed or if masking operations are done
8700 that select a superset of the bits in MASK, they can sometimes be
8701 ignored.
8702
8703 Return a possibly simplified expression, but always convert X to
8704 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8705
8706 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8707 are all off in X. This is used when X will be complemented, by either
8708 NOT, NEG, or XOR. */
8709
8710 static rtx
8711 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8712 int just_select)
8713 {
8714 enum rtx_code code = GET_CODE (x);
8715 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8716 machine_mode op_mode;
8717 unsigned HOST_WIDE_INT nonzero;
8718
8719 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8720 code below will do the wrong thing since the mode of such an
8721 expression is VOIDmode.
8722
8723 Also do nothing if X is a CLOBBER; this can happen if X was
8724 the return value from a call to gen_lowpart. */
8725 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8726 return x;
8727
8728 /* We want to perform the operation in its present mode unless we know
8729 that the operation is valid in MODE, in which case we do the operation
8730 in MODE. */
8731 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8732 && have_insn_for (code, mode))
8733 ? mode : GET_MODE (x));
8734
8735 /* It is not valid to do a right-shift in a narrower mode
8736 than the one it came in with. */
8737 if ((code == LSHIFTRT || code == ASHIFTRT)
8738 && partial_subreg_p (mode, GET_MODE (x)))
8739 op_mode = GET_MODE (x);
8740
8741 /* Truncate MASK to fit OP_MODE. */
8742 if (op_mode)
8743 mask &= GET_MODE_MASK (op_mode);
8744
8745 /* Determine what bits of X are guaranteed to be (non)zero. */
8746 nonzero = nonzero_bits (x, mode);
8747
8748 /* If none of the bits in X are needed, return a zero. */
8749 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8750 x = const0_rtx;
8751
8752 /* If X is a CONST_INT, return a new one. Do this here since the
8753 test below will fail. */
8754 if (CONST_INT_P (x))
8755 {
8756 if (SCALAR_INT_MODE_P (mode))
8757 return gen_int_mode (INTVAL (x) & mask, mode);
8758 else
8759 {
8760 x = GEN_INT (INTVAL (x) & mask);
8761 return gen_lowpart_common (mode, x);
8762 }
8763 }
8764
8765 /* If X is narrower than MODE and we want all the bits in X's mode, just
8766 get X in the proper mode. */
8767 if (paradoxical_subreg_p (mode, GET_MODE (x))
8768 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8769 return gen_lowpart (mode, x);
8770
8771 /* We can ignore the effect of a SUBREG if it narrows the mode or
8772 if the constant masks to zero all the bits the mode doesn't have. */
8773 if (GET_CODE (x) == SUBREG
8774 && subreg_lowpart_p (x)
8775 && (partial_subreg_p (x)
8776 || (mask
8777 & GET_MODE_MASK (GET_MODE (x))
8778 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8779 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8780
8781 scalar_int_mode int_mode, xmode;
8782 if (is_a <scalar_int_mode> (mode, &int_mode)
8783 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8784 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8785 integer too. */
8786 return force_int_to_mode (x, int_mode, xmode,
8787 as_a <scalar_int_mode> (op_mode),
8788 mask, just_select);
8789
8790 return gen_lowpart_or_truncate (mode, x);
8791 }
8792
8793 /* Subroutine of force_to_mode that handles cases in which both X and
8794 the result are scalar integers. MODE is the mode of the result,
8795 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8796 is preferred for simplified versions of X. The other arguments
8797 are as for force_to_mode. */
8798
8799 static rtx
8800 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8801 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8802 int just_select)
8803 {
8804 enum rtx_code code = GET_CODE (x);
8805 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8806 unsigned HOST_WIDE_INT fuller_mask;
8807 rtx op0, op1, temp;
8808 poly_int64 const_op0;
8809
8810 /* When we have an arithmetic operation, or a shift whose count we
8811 do not know, we need to assume that all bits up to the highest-order
8812 bit in MASK will be needed. This is how we form such a mask. */
8813 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8814 fuller_mask = HOST_WIDE_INT_M1U;
8815 else
8816 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8817 - 1);
8818
8819 switch (code)
8820 {
8821 case CLOBBER:
8822 /* If X is a (clobber (const_int)), return it since we know we are
8823 generating something that won't match. */
8824 return x;
8825
8826 case SIGN_EXTEND:
8827 case ZERO_EXTEND:
8828 case ZERO_EXTRACT:
8829 case SIGN_EXTRACT:
8830 x = expand_compound_operation (x);
8831 if (GET_CODE (x) != code)
8832 return force_to_mode (x, mode, mask, next_select);
8833 break;
8834
8835 case TRUNCATE:
8836 /* Similarly for a truncate. */
8837 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8838
8839 case AND:
8840 /* If this is an AND with a constant, convert it into an AND
8841 whose constant is the AND of that constant with MASK. If it
8842 remains an AND of MASK, delete it since it is redundant. */
8843
8844 if (CONST_INT_P (XEXP (x, 1)))
8845 {
8846 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8847 mask & INTVAL (XEXP (x, 1)));
8848 xmode = op_mode;
8849
8850 /* If X is still an AND, see if it is an AND with a mask that
8851 is just some low-order bits. If so, and it is MASK, we don't
8852 need it. */
8853
8854 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8855 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8856 x = XEXP (x, 0);
8857
8858 /* If it remains an AND, try making another AND with the bits
8859 in the mode mask that aren't in MASK turned on. If the
8860 constant in the AND is wide enough, this might make a
8861 cheaper constant. */
8862
8863 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8864 && GET_MODE_MASK (xmode) != mask
8865 && HWI_COMPUTABLE_MODE_P (xmode))
8866 {
8867 unsigned HOST_WIDE_INT cval
8868 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8869 rtx y;
8870
8871 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8872 gen_int_mode (cval, xmode));
8873 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8874 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8875 x = y;
8876 }
8877
8878 break;
8879 }
8880
8881 goto binop;
8882
8883 case PLUS:
8884 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8885 low-order bits (as in an alignment operation) and FOO is already
8886 aligned to that boundary, mask C1 to that boundary as well.
8887 This may eliminate that PLUS and, later, the AND. */
8888
8889 {
8890 unsigned int width = GET_MODE_PRECISION (mode);
8891 unsigned HOST_WIDE_INT smask = mask;
8892
8893 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8894 number, sign extend it. */
8895
8896 if (width < HOST_BITS_PER_WIDE_INT
8897 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8898 smask |= HOST_WIDE_INT_M1U << width;
8899
8900 if (CONST_INT_P (XEXP (x, 1))
8901 && pow2p_hwi (- smask)
8902 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8903 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8904 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8905 (INTVAL (XEXP (x, 1)) & smask)),
8906 mode, smask, next_select);
8907 }
8908
8909 /* fall through */
8910
8911 case MULT:
8912 /* Substituting into the operands of a widening MULT is not likely to
8913 create RTL matching a machine insn. */
8914 if (code == MULT
8915 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8916 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8917 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8918 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8919 && REG_P (XEXP (XEXP (x, 0), 0))
8920 && REG_P (XEXP (XEXP (x, 1), 0)))
8921 return gen_lowpart_or_truncate (mode, x);
8922
8923 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8924 most significant bit in MASK since carries from those bits will
8925 affect the bits we are interested in. */
8926 mask = fuller_mask;
8927 goto binop;
8928
8929 case MINUS:
8930 /* If X is (minus C Y) where C's least set bit is larger than any bit
8931 in the mask, then we may replace with (neg Y). */
8932 if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8933 && known_alignment (poly_uint64 (const_op0)) > mask)
8934 {
8935 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8936 return force_to_mode (x, mode, mask, next_select);
8937 }
8938
8939 /* Similarly, if C contains every bit in the fuller_mask, then we may
8940 replace with (not Y). */
8941 if (CONST_INT_P (XEXP (x, 0))
8942 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8943 {
8944 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8945 return force_to_mode (x, mode, mask, next_select);
8946 }
8947
8948 mask = fuller_mask;
8949 goto binop;
8950
8951 case IOR:
8952 case XOR:
8953 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8954 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8955 operation which may be a bitfield extraction. Ensure that the
8956 constant we form is not wider than the mode of X. */
8957
8958 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8959 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8960 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8961 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8962 && CONST_INT_P (XEXP (x, 1))
8963 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8964 + floor_log2 (INTVAL (XEXP (x, 1))))
8965 < GET_MODE_PRECISION (xmode))
8966 && (UINTVAL (XEXP (x, 1))
8967 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8968 {
8969 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8970 << INTVAL (XEXP (XEXP (x, 0), 1)),
8971 xmode);
8972 temp = simplify_gen_binary (GET_CODE (x), xmode,
8973 XEXP (XEXP (x, 0), 0), temp);
8974 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8975 XEXP (XEXP (x, 0), 1));
8976 return force_to_mode (x, mode, mask, next_select);
8977 }
8978
8979 binop:
8980 /* For most binary operations, just propagate into the operation and
8981 change the mode if we have an operation of that mode. */
8982
8983 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8984 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8985
8986 /* If we ended up truncating both operands, truncate the result of the
8987 operation instead. */
8988 if (GET_CODE (op0) == TRUNCATE
8989 && GET_CODE (op1) == TRUNCATE)
8990 {
8991 op0 = XEXP (op0, 0);
8992 op1 = XEXP (op1, 0);
8993 }
8994
8995 op0 = gen_lowpart_or_truncate (op_mode, op0);
8996 op1 = gen_lowpart_or_truncate (op_mode, op1);
8997
8998 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8999 {
9000 x = simplify_gen_binary (code, op_mode, op0, op1);
9001 xmode = op_mode;
9002 }
9003 break;
9004
9005 case ASHIFT:
9006 /* For left shifts, do the same, but just for the first operand.
9007 However, we cannot do anything with shifts where we cannot
9008 guarantee that the counts are smaller than the size of the mode
9009 because such a count will have a different meaning in a
9010 wider mode. */
9011
9012 if (! (CONST_INT_P (XEXP (x, 1))
9013 && INTVAL (XEXP (x, 1)) >= 0
9014 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
9015 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
9016 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
9017 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
9018 break;
9019
9020 /* If the shift count is a constant and we can do arithmetic in
9021 the mode of the shift, refine which bits we need. Otherwise, use the
9022 conservative form of the mask. */
9023 if (CONST_INT_P (XEXP (x, 1))
9024 && INTVAL (XEXP (x, 1)) >= 0
9025 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
9026 && HWI_COMPUTABLE_MODE_P (op_mode))
9027 mask >>= INTVAL (XEXP (x, 1));
9028 else
9029 mask = fuller_mask;
9030
9031 op0 = gen_lowpart_or_truncate (op_mode,
9032 force_to_mode (XEXP (x, 0), mode,
9033 mask, next_select));
9034
9035 if (op_mode != xmode || op0 != XEXP (x, 0))
9036 {
9037 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
9038 xmode = op_mode;
9039 }
9040 break;
9041
9042 case LSHIFTRT:
9043 /* Here we can only do something if the shift count is a constant,
9044 this shift constant is valid for the host, and we can do arithmetic
9045 in OP_MODE. */
9046
9047 if (CONST_INT_P (XEXP (x, 1))
9048 && INTVAL (XEXP (x, 1)) >= 0
9049 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
9050 && HWI_COMPUTABLE_MODE_P (op_mode))
9051 {
9052 rtx inner = XEXP (x, 0);
9053 unsigned HOST_WIDE_INT inner_mask;
9054
9055 /* Select the mask of the bits we need for the shift operand. */
9056 inner_mask = mask << INTVAL (XEXP (x, 1));
9057
9058 /* We can only change the mode of the shift if we can do arithmetic
9059 in the mode of the shift and INNER_MASK is no wider than the
9060 width of X's mode. */
9061 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
9062 op_mode = xmode;
9063
9064 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
9065
9066 if (xmode != op_mode || inner != XEXP (x, 0))
9067 {
9068 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
9069 xmode = op_mode;
9070 }
9071 }
9072
9073 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9074 shift and AND produces only copies of the sign bit (C2 is one less
9075 than a power of two), we can do this with just a shift. */
9076
9077 if (GET_CODE (x) == LSHIFTRT
9078 && CONST_INT_P (XEXP (x, 1))
9079 /* The shift puts one of the sign bit copies in the least significant
9080 bit. */
9081 && ((INTVAL (XEXP (x, 1))
9082 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9083 >= GET_MODE_PRECISION (xmode))
9084 && pow2p_hwi (mask + 1)
9085 /* Number of bits left after the shift must be more than the mask
9086 needs. */
9087 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9088 <= GET_MODE_PRECISION (xmode))
9089 /* Must be more sign bit copies than the mask needs. */
9090 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9091 >= exact_log2 (mask + 1)))
9092 {
9093 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9094 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9095 gen_int_shift_amount (xmode, nbits));
9096 }
9097 goto shiftrt;
9098
9099 case ASHIFTRT:
9100 /* If we are just looking for the sign bit, we don't need this shift at
9101 all, even if it has a variable count. */
9102 if (val_signbit_p (xmode, mask))
9103 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9104
9105 /* If this is a shift by a constant, get a mask that contains those bits
9106 that are not copies of the sign bit. We then have two cases: If
9107 MASK only includes those bits, this can be a logical shift, which may
9108 allow simplifications. If MASK is a single-bit field not within
9109 those bits, we are requesting a copy of the sign bit and hence can
9110 shift the sign bit to the appropriate location. */
9111
9112 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9113 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9114 {
9115 unsigned HOST_WIDE_INT nonzero;
9116 int i;
9117
9118 /* If the considered data is wider than HOST_WIDE_INT, we can't
9119 represent a mask for all its bits in a single scalar.
9120 But we only care about the lower bits, so calculate these. */
9121
9122 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9123 {
9124 nonzero = HOST_WIDE_INT_M1U;
9125
9126 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9127 is the number of bits a full-width mask would have set.
9128 We need only shift if these are fewer than nonzero can
9129 hold. If not, we must keep all bits set in nonzero. */
9130
9131 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9132 < HOST_BITS_PER_WIDE_INT)
9133 nonzero >>= INTVAL (XEXP (x, 1))
9134 + HOST_BITS_PER_WIDE_INT
9135 - GET_MODE_PRECISION (xmode);
9136 }
9137 else
9138 {
9139 nonzero = GET_MODE_MASK (xmode);
9140 nonzero >>= INTVAL (XEXP (x, 1));
9141 }
9142
9143 if ((mask & ~nonzero) == 0)
9144 {
9145 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9146 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9147 if (GET_CODE (x) != ASHIFTRT)
9148 return force_to_mode (x, mode, mask, next_select);
9149 }
9150
9151 else if ((i = exact_log2 (mask)) >= 0)
9152 {
9153 x = simplify_shift_const
9154 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9155 GET_MODE_PRECISION (xmode) - 1 - i);
9156
9157 if (GET_CODE (x) != ASHIFTRT)
9158 return force_to_mode (x, mode, mask, next_select);
9159 }
9160 }
9161
9162 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9163 even if the shift count isn't a constant. */
9164 if (mask == 1)
9165 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9166
9167 shiftrt:
9168
9169 /* If this is a zero- or sign-extension operation that just affects bits
9170 we don't care about, remove it. Be sure the call above returned
9171 something that is still a shift. */
9172
9173 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9174 && CONST_INT_P (XEXP (x, 1))
9175 && INTVAL (XEXP (x, 1)) >= 0
9176 && (INTVAL (XEXP (x, 1))
9177 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9178 && GET_CODE (XEXP (x, 0)) == ASHIFT
9179 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9180 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9181 next_select);
9182
9183 break;
9184
9185 case ROTATE:
9186 case ROTATERT:
9187 /* If the shift count is constant and we can do computations
9188 in the mode of X, compute where the bits we care about are.
9189 Otherwise, we can't do anything. Don't change the mode of
9190 the shift or propagate MODE into the shift, though. */
9191 if (CONST_INT_P (XEXP (x, 1))
9192 && INTVAL (XEXP (x, 1)) >= 0)
9193 {
9194 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9195 xmode, gen_int_mode (mask, xmode),
9196 XEXP (x, 1));
9197 if (temp && CONST_INT_P (temp))
9198 x = simplify_gen_binary (code, xmode,
9199 force_to_mode (XEXP (x, 0), xmode,
9200 INTVAL (temp), next_select),
9201 XEXP (x, 1));
9202 }
9203 break;
9204
9205 case NEG:
9206 /* If we just want the low-order bit, the NEG isn't needed since it
9207 won't change the low-order bit. */
9208 if (mask == 1)
9209 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9210
9211 /* We need any bits less significant than the most significant bit in
9212 MASK since carries from those bits will affect the bits we are
9213 interested in. */
9214 mask = fuller_mask;
9215 goto unop;
9216
9217 case NOT:
9218 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9219 same as the XOR case above. Ensure that the constant we form is not
9220 wider than the mode of X. */
9221
9222 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9223 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9224 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9225 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9226 < GET_MODE_PRECISION (xmode))
9227 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9228 {
9229 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9230 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9231 x = simplify_gen_binary (LSHIFTRT, xmode,
9232 temp, XEXP (XEXP (x, 0), 1));
9233
9234 return force_to_mode (x, mode, mask, next_select);
9235 }
9236
9237 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9238 use the full mask inside the NOT. */
9239 mask = fuller_mask;
9240
9241 unop:
9242 op0 = gen_lowpart_or_truncate (op_mode,
9243 force_to_mode (XEXP (x, 0), mode, mask,
9244 next_select));
9245 if (op_mode != xmode || op0 != XEXP (x, 0))
9246 {
9247 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9248 xmode = op_mode;
9249 }
9250 break;
9251
9252 case NE:
9253 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9254 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9255 which is equal to STORE_FLAG_VALUE. */
9256 if ((mask & ~STORE_FLAG_VALUE) == 0
9257 && XEXP (x, 1) == const0_rtx
9258 && GET_MODE (XEXP (x, 0)) == mode
9259 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9260 && (nonzero_bits (XEXP (x, 0), mode)
9261 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9262 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9263
9264 break;
9265
9266 case IF_THEN_ELSE:
9267 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9268 written in a narrower mode. We play it safe and do not do so. */
9269
9270 op0 = gen_lowpart_or_truncate (xmode,
9271 force_to_mode (XEXP (x, 1), mode,
9272 mask, next_select));
9273 op1 = gen_lowpart_or_truncate (xmode,
9274 force_to_mode (XEXP (x, 2), mode,
9275 mask, next_select));
9276 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9277 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9278 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9279 op0, op1);
9280 break;
9281
9282 default:
9283 break;
9284 }
9285
9286 /* Ensure we return a value of the proper mode. */
9287 return gen_lowpart_or_truncate (mode, x);
9288 }
9289 \f
9290 /* Return nonzero if X is an expression that has one of two values depending on
9291 whether some other value is zero or nonzero. In that case, we return the
9292 value that is being tested, *PTRUE is set to the value if the rtx being
9293 returned has a nonzero value, and *PFALSE is set to the other alternative.
9294
9295 If we return zero, we set *PTRUE and *PFALSE to X. */
9296
9297 static rtx
9298 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9299 {
9300 machine_mode mode = GET_MODE (x);
9301 enum rtx_code code = GET_CODE (x);
9302 rtx cond0, cond1, true0, true1, false0, false1;
9303 unsigned HOST_WIDE_INT nz;
9304 scalar_int_mode int_mode;
9305
9306 /* If we are comparing a value against zero, we are done. */
9307 if ((code == NE || code == EQ)
9308 && XEXP (x, 1) == const0_rtx)
9309 {
9310 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9311 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9312 return XEXP (x, 0);
9313 }
9314
9315 /* If this is a unary operation whose operand has one of two values, apply
9316 our opcode to compute those values. */
9317 else if (UNARY_P (x)
9318 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9319 {
9320 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9321 *pfalse = simplify_gen_unary (code, mode, false0,
9322 GET_MODE (XEXP (x, 0)));
9323 return cond0;
9324 }
9325
9326 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9327 make can't possibly match and would suppress other optimizations. */
9328 else if (code == COMPARE)
9329 ;
9330
9331 /* If this is a binary operation, see if either side has only one of two
9332 values. If either one does or if both do and they are conditional on
9333 the same value, compute the new true and false values. */
9334 else if (BINARY_P (x))
9335 {
9336 rtx op0 = XEXP (x, 0);
9337 rtx op1 = XEXP (x, 1);
9338 cond0 = if_then_else_cond (op0, &true0, &false0);
9339 cond1 = if_then_else_cond (op1, &true1, &false1);
9340
9341 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9342 && (REG_P (op0) || REG_P (op1)))
9343 {
9344 /* Try to enable a simplification by undoing work done by
9345 if_then_else_cond if it converted a REG into something more
9346 complex. */
9347 if (REG_P (op0))
9348 {
9349 cond0 = 0;
9350 true0 = false0 = op0;
9351 }
9352 else
9353 {
9354 cond1 = 0;
9355 true1 = false1 = op1;
9356 }
9357 }
9358
9359 if ((cond0 != 0 || cond1 != 0)
9360 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9361 {
9362 /* If if_then_else_cond returned zero, then true/false are the
9363 same rtl. We must copy one of them to prevent invalid rtl
9364 sharing. */
9365 if (cond0 == 0)
9366 true0 = copy_rtx (true0);
9367 else if (cond1 == 0)
9368 true1 = copy_rtx (true1);
9369
9370 if (COMPARISON_P (x))
9371 {
9372 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9373 true0, true1);
9374 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9375 false0, false1);
9376 }
9377 else
9378 {
9379 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9380 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9381 }
9382
9383 return cond0 ? cond0 : cond1;
9384 }
9385
9386 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9387 operands is zero when the other is nonzero, and vice-versa,
9388 and STORE_FLAG_VALUE is 1 or -1. */
9389
9390 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9391 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9392 || code == UMAX)
9393 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9394 {
9395 rtx op0 = XEXP (XEXP (x, 0), 1);
9396 rtx op1 = XEXP (XEXP (x, 1), 1);
9397
9398 cond0 = XEXP (XEXP (x, 0), 0);
9399 cond1 = XEXP (XEXP (x, 1), 0);
9400
9401 if (COMPARISON_P (cond0)
9402 && COMPARISON_P (cond1)
9403 && SCALAR_INT_MODE_P (mode)
9404 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9405 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9406 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9407 || ((swap_condition (GET_CODE (cond0))
9408 == reversed_comparison_code (cond1, NULL))
9409 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9410 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9411 && ! side_effects_p (x))
9412 {
9413 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9414 *pfalse = simplify_gen_binary (MULT, mode,
9415 (code == MINUS
9416 ? simplify_gen_unary (NEG, mode,
9417 op1, mode)
9418 : op1),
9419 const_true_rtx);
9420 return cond0;
9421 }
9422 }
9423
9424 /* Similarly for MULT, AND and UMIN, except that for these the result
9425 is always zero. */
9426 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9427 && (code == MULT || code == AND || code == UMIN)
9428 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9429 {
9430 cond0 = XEXP (XEXP (x, 0), 0);
9431 cond1 = XEXP (XEXP (x, 1), 0);
9432
9433 if (COMPARISON_P (cond0)
9434 && COMPARISON_P (cond1)
9435 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9436 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9437 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9438 || ((swap_condition (GET_CODE (cond0))
9439 == reversed_comparison_code (cond1, NULL))
9440 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9441 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9442 && ! side_effects_p (x))
9443 {
9444 *ptrue = *pfalse = const0_rtx;
9445 return cond0;
9446 }
9447 }
9448 }
9449
9450 else if (code == IF_THEN_ELSE)
9451 {
9452 /* If we have IF_THEN_ELSE already, extract the condition and
9453 canonicalize it if it is NE or EQ. */
9454 cond0 = XEXP (x, 0);
9455 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9456 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9457 return XEXP (cond0, 0);
9458 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9459 {
9460 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9461 return XEXP (cond0, 0);
9462 }
9463 else
9464 return cond0;
9465 }
9466
9467 /* If X is a SUBREG, we can narrow both the true and false values
9468 if the inner expression, if there is a condition. */
9469 else if (code == SUBREG
9470 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9471 &false0)) != 0)
9472 {
9473 true0 = simplify_gen_subreg (mode, true0,
9474 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9475 false0 = simplify_gen_subreg (mode, false0,
9476 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9477 if (true0 && false0)
9478 {
9479 *ptrue = true0;
9480 *pfalse = false0;
9481 return cond0;
9482 }
9483 }
9484
9485 /* If X is a constant, this isn't special and will cause confusions
9486 if we treat it as such. Likewise if it is equivalent to a constant. */
9487 else if (CONSTANT_P (x)
9488 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9489 ;
9490
9491 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9492 will be least confusing to the rest of the compiler. */
9493 else if (mode == BImode)
9494 {
9495 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9496 return x;
9497 }
9498
9499 /* If X is known to be either 0 or -1, those are the true and
9500 false values when testing X. */
9501 else if (x == constm1_rtx || x == const0_rtx
9502 || (is_a <scalar_int_mode> (mode, &int_mode)
9503 && (num_sign_bit_copies (x, int_mode)
9504 == GET_MODE_PRECISION (int_mode))))
9505 {
9506 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9507 return x;
9508 }
9509
9510 /* Likewise for 0 or a single bit. */
9511 else if (HWI_COMPUTABLE_MODE_P (mode)
9512 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9513 {
9514 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9515 return x;
9516 }
9517
9518 /* Otherwise fail; show no condition with true and false values the same. */
9519 *ptrue = *pfalse = x;
9520 return 0;
9521 }
9522 \f
9523 /* Return the value of expression X given the fact that condition COND
9524 is known to be true when applied to REG as its first operand and VAL
9525 as its second. X is known to not be shared and so can be modified in
9526 place.
9527
9528 We only handle the simplest cases, and specifically those cases that
9529 arise with IF_THEN_ELSE expressions. */
9530
9531 static rtx
9532 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9533 {
9534 enum rtx_code code = GET_CODE (x);
9535 const char *fmt;
9536 int i, j;
9537
9538 if (side_effects_p (x))
9539 return x;
9540
9541 /* If either operand of the condition is a floating point value,
9542 then we have to avoid collapsing an EQ comparison. */
9543 if (cond == EQ
9544 && rtx_equal_p (x, reg)
9545 && ! FLOAT_MODE_P (GET_MODE (x))
9546 && ! FLOAT_MODE_P (GET_MODE (val)))
9547 return val;
9548
9549 if (cond == UNEQ && rtx_equal_p (x, reg))
9550 return val;
9551
9552 /* If X is (abs REG) and we know something about REG's relationship
9553 with zero, we may be able to simplify this. */
9554
9555 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9556 switch (cond)
9557 {
9558 case GE: case GT: case EQ:
9559 return XEXP (x, 0);
9560 case LT: case LE:
9561 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9562 XEXP (x, 0),
9563 GET_MODE (XEXP (x, 0)));
9564 default:
9565 break;
9566 }
9567
9568 /* The only other cases we handle are MIN, MAX, and comparisons if the
9569 operands are the same as REG and VAL. */
9570
9571 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9572 {
9573 if (rtx_equal_p (XEXP (x, 0), val))
9574 {
9575 std::swap (val, reg);
9576 cond = swap_condition (cond);
9577 }
9578
9579 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9580 {
9581 if (COMPARISON_P (x))
9582 {
9583 if (comparison_dominates_p (cond, code))
9584 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9585
9586 code = reversed_comparison_code (x, NULL);
9587 if (code != UNKNOWN
9588 && comparison_dominates_p (cond, code))
9589 return CONST0_RTX (GET_MODE (x));
9590 else
9591 return x;
9592 }
9593 else if (code == SMAX || code == SMIN
9594 || code == UMIN || code == UMAX)
9595 {
9596 int unsignedp = (code == UMIN || code == UMAX);
9597
9598 /* Do not reverse the condition when it is NE or EQ.
9599 This is because we cannot conclude anything about
9600 the value of 'SMAX (x, y)' when x is not equal to y,
9601 but we can when x equals y. */
9602 if ((code == SMAX || code == UMAX)
9603 && ! (cond == EQ || cond == NE))
9604 cond = reverse_condition (cond);
9605
9606 switch (cond)
9607 {
9608 case GE: case GT:
9609 return unsignedp ? x : XEXP (x, 1);
9610 case LE: case LT:
9611 return unsignedp ? x : XEXP (x, 0);
9612 case GEU: case GTU:
9613 return unsignedp ? XEXP (x, 1) : x;
9614 case LEU: case LTU:
9615 return unsignedp ? XEXP (x, 0) : x;
9616 default:
9617 break;
9618 }
9619 }
9620 }
9621 }
9622 else if (code == SUBREG)
9623 {
9624 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9625 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9626
9627 if (SUBREG_REG (x) != r)
9628 {
9629 /* We must simplify subreg here, before we lose track of the
9630 original inner_mode. */
9631 new_rtx = simplify_subreg (GET_MODE (x), r,
9632 inner_mode, SUBREG_BYTE (x));
9633 if (new_rtx)
9634 return new_rtx;
9635 else
9636 SUBST (SUBREG_REG (x), r);
9637 }
9638
9639 return x;
9640 }
9641 /* We don't have to handle SIGN_EXTEND here, because even in the
9642 case of replacing something with a modeless CONST_INT, a
9643 CONST_INT is already (supposed to be) a valid sign extension for
9644 its narrower mode, which implies it's already properly
9645 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9646 story is different. */
9647 else if (code == ZERO_EXTEND)
9648 {
9649 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9650 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9651
9652 if (XEXP (x, 0) != r)
9653 {
9654 /* We must simplify the zero_extend here, before we lose
9655 track of the original inner_mode. */
9656 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9657 r, inner_mode);
9658 if (new_rtx)
9659 return new_rtx;
9660 else
9661 SUBST (XEXP (x, 0), r);
9662 }
9663
9664 return x;
9665 }
9666
9667 fmt = GET_RTX_FORMAT (code);
9668 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9669 {
9670 if (fmt[i] == 'e')
9671 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9672 else if (fmt[i] == 'E')
9673 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9674 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9675 cond, reg, val));
9676 }
9677
9678 return x;
9679 }
9680 \f
9681 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9682 assignment as a field assignment. */
9683
9684 static int
9685 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9686 {
9687 if (widen_x && GET_MODE (x) != GET_MODE (y))
9688 {
9689 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9690 return 0;
9691 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9692 return 0;
9693 x = adjust_address_nv (x, GET_MODE (y),
9694 byte_lowpart_offset (GET_MODE (y),
9695 GET_MODE (x)));
9696 }
9697
9698 if (x == y || rtx_equal_p (x, y))
9699 return 1;
9700
9701 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9702 return 0;
9703
9704 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9705 Note that all SUBREGs of MEM are paradoxical; otherwise they
9706 would have been rewritten. */
9707 if (MEM_P (x) && GET_CODE (y) == SUBREG
9708 && MEM_P (SUBREG_REG (y))
9709 && rtx_equal_p (SUBREG_REG (y),
9710 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9711 return 1;
9712
9713 if (MEM_P (y) && GET_CODE (x) == SUBREG
9714 && MEM_P (SUBREG_REG (x))
9715 && rtx_equal_p (SUBREG_REG (x),
9716 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9717 return 1;
9718
9719 /* We used to see if get_last_value of X and Y were the same but that's
9720 not correct. In one direction, we'll cause the assignment to have
9721 the wrong destination and in the case, we'll import a register into this
9722 insn that might have already have been dead. So fail if none of the
9723 above cases are true. */
9724 return 0;
9725 }
9726 \f
9727 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9728 Return that assignment if so.
9729
9730 We only handle the most common cases. */
9731
9732 static rtx
9733 make_field_assignment (rtx x)
9734 {
9735 rtx dest = SET_DEST (x);
9736 rtx src = SET_SRC (x);
9737 rtx assign;
9738 rtx rhs, lhs;
9739 HOST_WIDE_INT c1;
9740 HOST_WIDE_INT pos;
9741 unsigned HOST_WIDE_INT len;
9742 rtx other;
9743
9744 /* All the rules in this function are specific to scalar integers. */
9745 scalar_int_mode mode;
9746 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9747 return x;
9748
9749 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9750 a clear of a one-bit field. We will have changed it to
9751 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9752 for a SUBREG. */
9753
9754 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9755 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9756 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9757 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9758 {
9759 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9760 1, 1, 1, 0);
9761 if (assign != 0)
9762 return gen_rtx_SET (assign, const0_rtx);
9763 return x;
9764 }
9765
9766 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9767 && subreg_lowpart_p (XEXP (src, 0))
9768 && partial_subreg_p (XEXP (src, 0))
9769 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9770 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9771 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9772 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9773 {
9774 assign = make_extraction (VOIDmode, dest, 0,
9775 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9776 1, 1, 1, 0);
9777 if (assign != 0)
9778 return gen_rtx_SET (assign, const0_rtx);
9779 return x;
9780 }
9781
9782 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9783 one-bit field. */
9784 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9785 && XEXP (XEXP (src, 0), 0) == const1_rtx
9786 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9787 {
9788 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9789 1, 1, 1, 0);
9790 if (assign != 0)
9791 return gen_rtx_SET (assign, const1_rtx);
9792 return x;
9793 }
9794
9795 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9796 SRC is an AND with all bits of that field set, then we can discard
9797 the AND. */
9798 if (GET_CODE (dest) == ZERO_EXTRACT
9799 && CONST_INT_P (XEXP (dest, 1))
9800 && GET_CODE (src) == AND
9801 && CONST_INT_P (XEXP (src, 1)))
9802 {
9803 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9804 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9805 unsigned HOST_WIDE_INT ze_mask;
9806
9807 if (width >= HOST_BITS_PER_WIDE_INT)
9808 ze_mask = -1;
9809 else
9810 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9811
9812 /* Complete overlap. We can remove the source AND. */
9813 if ((and_mask & ze_mask) == ze_mask)
9814 return gen_rtx_SET (dest, XEXP (src, 0));
9815
9816 /* Partial overlap. We can reduce the source AND. */
9817 if ((and_mask & ze_mask) != and_mask)
9818 {
9819 src = gen_rtx_AND (mode, XEXP (src, 0),
9820 gen_int_mode (and_mask & ze_mask, mode));
9821 return gen_rtx_SET (dest, src);
9822 }
9823 }
9824
9825 /* The other case we handle is assignments into a constant-position
9826 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9827 a mask that has all one bits except for a group of zero bits and
9828 OTHER is known to have zeros where C1 has ones, this is such an
9829 assignment. Compute the position and length from C1. Shift OTHER
9830 to the appropriate position, force it to the required mode, and
9831 make the extraction. Check for the AND in both operands. */
9832
9833 /* One or more SUBREGs might obscure the constant-position field
9834 assignment. The first one we are likely to encounter is an outer
9835 narrowing SUBREG, which we can just strip for the purposes of
9836 identifying the constant-field assignment. */
9837 scalar_int_mode src_mode = mode;
9838 if (GET_CODE (src) == SUBREG
9839 && subreg_lowpart_p (src)
9840 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9841 src = SUBREG_REG (src);
9842
9843 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9844 return x;
9845
9846 rhs = expand_compound_operation (XEXP (src, 0));
9847 lhs = expand_compound_operation (XEXP (src, 1));
9848
9849 if (GET_CODE (rhs) == AND
9850 && CONST_INT_P (XEXP (rhs, 1))
9851 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9852 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9853 /* The second SUBREG that might get in the way is a paradoxical
9854 SUBREG around the first operand of the AND. We want to
9855 pretend the operand is as wide as the destination here. We
9856 do this by adjusting the MEM to wider mode for the sole
9857 purpose of the call to rtx_equal_for_field_assignment_p. Also
9858 note this trick only works for MEMs. */
9859 else if (GET_CODE (rhs) == AND
9860 && paradoxical_subreg_p (XEXP (rhs, 0))
9861 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9862 && CONST_INT_P (XEXP (rhs, 1))
9863 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9864 dest, true))
9865 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9866 else if (GET_CODE (lhs) == AND
9867 && CONST_INT_P (XEXP (lhs, 1))
9868 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9869 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9870 /* The second SUBREG that might get in the way is a paradoxical
9871 SUBREG around the first operand of the AND. We want to
9872 pretend the operand is as wide as the destination here. We
9873 do this by adjusting the MEM to wider mode for the sole
9874 purpose of the call to rtx_equal_for_field_assignment_p. Also
9875 note this trick only works for MEMs. */
9876 else if (GET_CODE (lhs) == AND
9877 && paradoxical_subreg_p (XEXP (lhs, 0))
9878 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9879 && CONST_INT_P (XEXP (lhs, 1))
9880 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9881 dest, true))
9882 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9883 else
9884 return x;
9885
9886 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9887 if (pos < 0
9888 || pos + len > GET_MODE_PRECISION (mode)
9889 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9890 || (c1 & nonzero_bits (other, mode)) != 0)
9891 return x;
9892
9893 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9894 if (assign == 0)
9895 return x;
9896
9897 /* The mode to use for the source is the mode of the assignment, or of
9898 what is inside a possible STRICT_LOW_PART. */
9899 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9900 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9901
9902 /* Shift OTHER right POS places and make it the source, restricting it
9903 to the proper length and mode. */
9904
9905 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9906 src_mode, other, pos),
9907 dest);
9908 src = force_to_mode (src, new_mode,
9909 len >= HOST_BITS_PER_WIDE_INT
9910 ? HOST_WIDE_INT_M1U
9911 : (HOST_WIDE_INT_1U << len) - 1,
9912 0);
9913
9914 /* If SRC is masked by an AND that does not make a difference in
9915 the value being stored, strip it. */
9916 if (GET_CODE (assign) == ZERO_EXTRACT
9917 && CONST_INT_P (XEXP (assign, 1))
9918 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9919 && GET_CODE (src) == AND
9920 && CONST_INT_P (XEXP (src, 1))
9921 && UINTVAL (XEXP (src, 1))
9922 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9923 src = XEXP (src, 0);
9924
9925 return gen_rtx_SET (assign, src);
9926 }
9927 \f
9928 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9929 if so. */
9930
9931 static rtx
9932 apply_distributive_law (rtx x)
9933 {
9934 enum rtx_code code = GET_CODE (x);
9935 enum rtx_code inner_code;
9936 rtx lhs, rhs, other;
9937 rtx tem;
9938
9939 /* Distributivity is not true for floating point as it can change the
9940 value. So we don't do it unless -funsafe-math-optimizations. */
9941 if (FLOAT_MODE_P (GET_MODE (x))
9942 && ! flag_unsafe_math_optimizations)
9943 return x;
9944
9945 /* The outer operation can only be one of the following: */
9946 if (code != IOR && code != AND && code != XOR
9947 && code != PLUS && code != MINUS)
9948 return x;
9949
9950 lhs = XEXP (x, 0);
9951 rhs = XEXP (x, 1);
9952
9953 /* If either operand is a primitive we can't do anything, so get out
9954 fast. */
9955 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9956 return x;
9957
9958 lhs = expand_compound_operation (lhs);
9959 rhs = expand_compound_operation (rhs);
9960 inner_code = GET_CODE (lhs);
9961 if (inner_code != GET_CODE (rhs))
9962 return x;
9963
9964 /* See if the inner and outer operations distribute. */
9965 switch (inner_code)
9966 {
9967 case LSHIFTRT:
9968 case ASHIFTRT:
9969 case AND:
9970 case IOR:
9971 /* These all distribute except over PLUS. */
9972 if (code == PLUS || code == MINUS)
9973 return x;
9974 break;
9975
9976 case MULT:
9977 if (code != PLUS && code != MINUS)
9978 return x;
9979 break;
9980
9981 case ASHIFT:
9982 /* This is also a multiply, so it distributes over everything. */
9983 break;
9984
9985 /* This used to handle SUBREG, but this turned out to be counter-
9986 productive, since (subreg (op ...)) usually is not handled by
9987 insn patterns, and this "optimization" therefore transformed
9988 recognizable patterns into unrecognizable ones. Therefore the
9989 SUBREG case was removed from here.
9990
9991 It is possible that distributing SUBREG over arithmetic operations
9992 leads to an intermediate result than can then be optimized further,
9993 e.g. by moving the outer SUBREG to the other side of a SET as done
9994 in simplify_set. This seems to have been the original intent of
9995 handling SUBREGs here.
9996
9997 However, with current GCC this does not appear to actually happen,
9998 at least on major platforms. If some case is found where removing
9999 the SUBREG case here prevents follow-on optimizations, distributing
10000 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
10001
10002 default:
10003 return x;
10004 }
10005
10006 /* Set LHS and RHS to the inner operands (A and B in the example
10007 above) and set OTHER to the common operand (C in the example).
10008 There is only one way to do this unless the inner operation is
10009 commutative. */
10010 if (COMMUTATIVE_ARITH_P (lhs)
10011 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
10012 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
10013 else if (COMMUTATIVE_ARITH_P (lhs)
10014 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
10015 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
10016 else if (COMMUTATIVE_ARITH_P (lhs)
10017 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
10018 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
10019 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
10020 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
10021 else
10022 return x;
10023
10024 /* Form the new inner operation, seeing if it simplifies first. */
10025 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
10026
10027 /* There is one exception to the general way of distributing:
10028 (a | c) ^ (b | c) -> (a ^ b) & ~c */
10029 if (code == XOR && inner_code == IOR)
10030 {
10031 inner_code = AND;
10032 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
10033 }
10034
10035 /* We may be able to continuing distributing the result, so call
10036 ourselves recursively on the inner operation before forming the
10037 outer operation, which we return. */
10038 return simplify_gen_binary (inner_code, GET_MODE (x),
10039 apply_distributive_law (tem), other);
10040 }
10041
10042 /* See if X is of the form (* (+ A B) C), and if so convert to
10043 (+ (* A C) (* B C)) and try to simplify.
10044
10045 Most of the time, this results in no change. However, if some of
10046 the operands are the same or inverses of each other, simplifications
10047 will result.
10048
10049 For example, (and (ior A B) (not B)) can occur as the result of
10050 expanding a bit field assignment. When we apply the distributive
10051 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10052 which then simplifies to (and (A (not B))).
10053
10054 Note that no checks happen on the validity of applying the inverse
10055 distributive law. This is pointless since we can do it in the
10056 few places where this routine is called.
10057
10058 N is the index of the term that is decomposed (the arithmetic operation,
10059 i.e. (+ A B) in the first example above). !N is the index of the term that
10060 is distributed, i.e. of C in the first example above. */
10061 static rtx
10062 distribute_and_simplify_rtx (rtx x, int n)
10063 {
10064 machine_mode mode;
10065 enum rtx_code outer_code, inner_code;
10066 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
10067
10068 /* Distributivity is not true for floating point as it can change the
10069 value. So we don't do it unless -funsafe-math-optimizations. */
10070 if (FLOAT_MODE_P (GET_MODE (x))
10071 && ! flag_unsafe_math_optimizations)
10072 return NULL_RTX;
10073
10074 decomposed = XEXP (x, n);
10075 if (!ARITHMETIC_P (decomposed))
10076 return NULL_RTX;
10077
10078 mode = GET_MODE (x);
10079 outer_code = GET_CODE (x);
10080 distributed = XEXP (x, !n);
10081
10082 inner_code = GET_CODE (decomposed);
10083 inner_op0 = XEXP (decomposed, 0);
10084 inner_op1 = XEXP (decomposed, 1);
10085
10086 /* Special case (and (xor B C) (not A)), which is equivalent to
10087 (xor (ior A B) (ior A C)) */
10088 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10089 {
10090 distributed = XEXP (distributed, 0);
10091 outer_code = IOR;
10092 }
10093
10094 if (n == 0)
10095 {
10096 /* Distribute the second term. */
10097 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10098 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10099 }
10100 else
10101 {
10102 /* Distribute the first term. */
10103 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10104 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10105 }
10106
10107 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10108 new_op0, new_op1));
10109 if (GET_CODE (tmp) != outer_code
10110 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10111 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10112 return tmp;
10113
10114 return NULL_RTX;
10115 }
10116 \f
10117 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10118 in MODE. Return an equivalent form, if different from (and VAROP
10119 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10120
10121 static rtx
10122 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10123 unsigned HOST_WIDE_INT constop)
10124 {
10125 unsigned HOST_WIDE_INT nonzero;
10126 unsigned HOST_WIDE_INT orig_constop;
10127 rtx orig_varop;
10128 int i;
10129
10130 orig_varop = varop;
10131 orig_constop = constop;
10132 if (GET_CODE (varop) == CLOBBER)
10133 return NULL_RTX;
10134
10135 /* Simplify VAROP knowing that we will be only looking at some of the
10136 bits in it.
10137
10138 Note by passing in CONSTOP, we guarantee that the bits not set in
10139 CONSTOP are not significant and will never be examined. We must
10140 ensure that is the case by explicitly masking out those bits
10141 before returning. */
10142 varop = force_to_mode (varop, mode, constop, 0);
10143
10144 /* If VAROP is a CLOBBER, we will fail so return it. */
10145 if (GET_CODE (varop) == CLOBBER)
10146 return varop;
10147
10148 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10149 to VAROP and return the new constant. */
10150 if (CONST_INT_P (varop))
10151 return gen_int_mode (INTVAL (varop) & constop, mode);
10152
10153 /* See what bits may be nonzero in VAROP. Unlike the general case of
10154 a call to nonzero_bits, here we don't care about bits outside
10155 MODE. */
10156
10157 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10158
10159 /* Turn off all bits in the constant that are known to already be zero.
10160 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10161 which is tested below. */
10162
10163 constop &= nonzero;
10164
10165 /* If we don't have any bits left, return zero. */
10166 if (constop == 0)
10167 return const0_rtx;
10168
10169 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10170 a power of two, we can replace this with an ASHIFT. */
10171 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10172 && (i = exact_log2 (constop)) >= 0)
10173 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10174
10175 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10176 or XOR, then try to apply the distributive law. This may eliminate
10177 operations if either branch can be simplified because of the AND.
10178 It may also make some cases more complex, but those cases probably
10179 won't match a pattern either with or without this. */
10180
10181 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10182 {
10183 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10184 return
10185 gen_lowpart
10186 (mode,
10187 apply_distributive_law
10188 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10189 simplify_and_const_int (NULL_RTX, varop_mode,
10190 XEXP (varop, 0),
10191 constop),
10192 simplify_and_const_int (NULL_RTX, varop_mode,
10193 XEXP (varop, 1),
10194 constop))));
10195 }
10196
10197 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10198 the AND and see if one of the operands simplifies to zero. If so, we
10199 may eliminate it. */
10200
10201 if (GET_CODE (varop) == PLUS
10202 && pow2p_hwi (constop + 1))
10203 {
10204 rtx o0, o1;
10205
10206 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10207 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10208 if (o0 == const0_rtx)
10209 return o1;
10210 if (o1 == const0_rtx)
10211 return o0;
10212 }
10213
10214 /* Make a SUBREG if necessary. If we can't make it, fail. */
10215 varop = gen_lowpart (mode, varop);
10216 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10217 return NULL_RTX;
10218
10219 /* If we are only masking insignificant bits, return VAROP. */
10220 if (constop == nonzero)
10221 return varop;
10222
10223 if (varop == orig_varop && constop == orig_constop)
10224 return NULL_RTX;
10225
10226 /* Otherwise, return an AND. */
10227 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10228 }
10229
10230
10231 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10232 in MODE.
10233
10234 Return an equivalent form, if different from X. Otherwise, return X. If
10235 X is zero, we are to always construct the equivalent form. */
10236
10237 static rtx
10238 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10239 unsigned HOST_WIDE_INT constop)
10240 {
10241 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10242 if (tem)
10243 return tem;
10244
10245 if (!x)
10246 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10247 gen_int_mode (constop, mode));
10248 if (GET_MODE (x) != mode)
10249 x = gen_lowpart (mode, x);
10250 return x;
10251 }
10252 \f
10253 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10254 We don't care about bits outside of those defined in MODE.
10255 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10256
10257 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10258 a shift, AND, or zero_extract, we can do better. */
10259
10260 static rtx
10261 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10262 scalar_int_mode mode,
10263 unsigned HOST_WIDE_INT *nonzero)
10264 {
10265 rtx tem;
10266 reg_stat_type *rsp;
10267
10268 /* If X is a register whose nonzero bits value is current, use it.
10269 Otherwise, if X is a register whose value we can find, use that
10270 value. Otherwise, use the previously-computed global nonzero bits
10271 for this register. */
10272
10273 rsp = &reg_stat[REGNO (x)];
10274 if (rsp->last_set_value != 0
10275 && (rsp->last_set_mode == mode
10276 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10277 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10278 && GET_MODE_CLASS (mode) == MODE_INT))
10279 && ((rsp->last_set_label >= label_tick_ebb_start
10280 && rsp->last_set_label < label_tick)
10281 || (rsp->last_set_label == label_tick
10282 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10283 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10284 && REGNO (x) < reg_n_sets_max
10285 && REG_N_SETS (REGNO (x)) == 1
10286 && !REGNO_REG_SET_P
10287 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10288 REGNO (x)))))
10289 {
10290 /* Note that, even if the precision of last_set_mode is lower than that
10291 of mode, record_value_for_reg invoked nonzero_bits on the register
10292 with nonzero_bits_mode (because last_set_mode is necessarily integral
10293 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10294 are all valid, hence in mode too since nonzero_bits_mode is defined
10295 to the largest HWI_COMPUTABLE_MODE_P mode. */
10296 *nonzero &= rsp->last_set_nonzero_bits;
10297 return NULL;
10298 }
10299
10300 tem = get_last_value (x);
10301 if (tem)
10302 {
10303 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10304 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10305
10306 return tem;
10307 }
10308
10309 if (nonzero_sign_valid && rsp->nonzero_bits)
10310 {
10311 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10312
10313 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10314 /* We don't know anything about the upper bits. */
10315 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10316
10317 *nonzero &= mask;
10318 }
10319
10320 return NULL;
10321 }
10322
10323 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10324 end of X that are known to be equal to the sign bit. X will be used
10325 in mode MODE; the returned value will always be between 1 and the
10326 number of bits in MODE. */
10327
10328 static rtx
10329 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10330 scalar_int_mode mode,
10331 unsigned int *result)
10332 {
10333 rtx tem;
10334 reg_stat_type *rsp;
10335
10336 rsp = &reg_stat[REGNO (x)];
10337 if (rsp->last_set_value != 0
10338 && rsp->last_set_mode == mode
10339 && ((rsp->last_set_label >= label_tick_ebb_start
10340 && rsp->last_set_label < label_tick)
10341 || (rsp->last_set_label == label_tick
10342 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10343 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10344 && REGNO (x) < reg_n_sets_max
10345 && REG_N_SETS (REGNO (x)) == 1
10346 && !REGNO_REG_SET_P
10347 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10348 REGNO (x)))))
10349 {
10350 *result = rsp->last_set_sign_bit_copies;
10351 return NULL;
10352 }
10353
10354 tem = get_last_value (x);
10355 if (tem != 0)
10356 return tem;
10357
10358 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10359 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10360 *result = rsp->sign_bit_copies;
10361
10362 return NULL;
10363 }
10364 \f
10365 /* Return the number of "extended" bits there are in X, when interpreted
10366 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10367 unsigned quantities, this is the number of high-order zero bits.
10368 For signed quantities, this is the number of copies of the sign bit
10369 minus 1. In both case, this function returns the number of "spare"
10370 bits. For example, if two quantities for which this function returns
10371 at least 1 are added, the addition is known not to overflow.
10372
10373 This function will always return 0 unless called during combine, which
10374 implies that it must be called from a define_split. */
10375
10376 unsigned int
10377 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10378 {
10379 if (nonzero_sign_valid == 0)
10380 return 0;
10381
10382 scalar_int_mode int_mode;
10383 return (unsignedp
10384 ? (is_a <scalar_int_mode> (mode, &int_mode)
10385 && HWI_COMPUTABLE_MODE_P (int_mode)
10386 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10387 - floor_log2 (nonzero_bits (x, int_mode)))
10388 : 0)
10389 : num_sign_bit_copies (x, mode) - 1);
10390 }
10391
10392 /* This function is called from `simplify_shift_const' to merge two
10393 outer operations. Specifically, we have already found that we need
10394 to perform operation *POP0 with constant *PCONST0 at the outermost
10395 position. We would now like to also perform OP1 with constant CONST1
10396 (with *POP0 being done last).
10397
10398 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10399 the resulting operation. *PCOMP_P is set to 1 if we would need to
10400 complement the innermost operand, otherwise it is unchanged.
10401
10402 MODE is the mode in which the operation will be done. No bits outside
10403 the width of this mode matter. It is assumed that the width of this mode
10404 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10405
10406 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10407 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10408 result is simply *PCONST0.
10409
10410 If the resulting operation cannot be expressed as one operation, we
10411 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10412
10413 static int
10414 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10415 {
10416 enum rtx_code op0 = *pop0;
10417 HOST_WIDE_INT const0 = *pconst0;
10418
10419 const0 &= GET_MODE_MASK (mode);
10420 const1 &= GET_MODE_MASK (mode);
10421
10422 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10423 if (op0 == AND)
10424 const1 &= const0;
10425
10426 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10427 if OP0 is SET. */
10428
10429 if (op1 == UNKNOWN || op0 == SET)
10430 return 1;
10431
10432 else if (op0 == UNKNOWN)
10433 op0 = op1, const0 = const1;
10434
10435 else if (op0 == op1)
10436 {
10437 switch (op0)
10438 {
10439 case AND:
10440 const0 &= const1;
10441 break;
10442 case IOR:
10443 const0 |= const1;
10444 break;
10445 case XOR:
10446 const0 ^= const1;
10447 break;
10448 case PLUS:
10449 const0 += const1;
10450 break;
10451 case NEG:
10452 op0 = UNKNOWN;
10453 break;
10454 default:
10455 break;
10456 }
10457 }
10458
10459 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10460 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10461 return 0;
10462
10463 /* If the two constants aren't the same, we can't do anything. The
10464 remaining six cases can all be done. */
10465 else if (const0 != const1)
10466 return 0;
10467
10468 else
10469 switch (op0)
10470 {
10471 case IOR:
10472 if (op1 == AND)
10473 /* (a & b) | b == b */
10474 op0 = SET;
10475 else /* op1 == XOR */
10476 /* (a ^ b) | b == a | b */
10477 {;}
10478 break;
10479
10480 case XOR:
10481 if (op1 == AND)
10482 /* (a & b) ^ b == (~a) & b */
10483 op0 = AND, *pcomp_p = 1;
10484 else /* op1 == IOR */
10485 /* (a | b) ^ b == a & ~b */
10486 op0 = AND, const0 = ~const0;
10487 break;
10488
10489 case AND:
10490 if (op1 == IOR)
10491 /* (a | b) & b == b */
10492 op0 = SET;
10493 else /* op1 == XOR */
10494 /* (a ^ b) & b) == (~a) & b */
10495 *pcomp_p = 1;
10496 break;
10497 default:
10498 break;
10499 }
10500
10501 /* Check for NO-OP cases. */
10502 const0 &= GET_MODE_MASK (mode);
10503 if (const0 == 0
10504 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10505 op0 = UNKNOWN;
10506 else if (const0 == 0 && op0 == AND)
10507 op0 = SET;
10508 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10509 && op0 == AND)
10510 op0 = UNKNOWN;
10511
10512 *pop0 = op0;
10513
10514 /* ??? Slightly redundant with the above mask, but not entirely.
10515 Moving this above means we'd have to sign-extend the mode mask
10516 for the final test. */
10517 if (op0 != UNKNOWN && op0 != NEG)
10518 *pconst0 = trunc_int_for_mode (const0, mode);
10519
10520 return 1;
10521 }
10522 \f
10523 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10524 the shift in. The original shift operation CODE is performed on OP in
10525 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10526 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10527 result of the shift is subject to operation OUTER_CODE with operand
10528 OUTER_CONST. */
10529
10530 static scalar_int_mode
10531 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10532 scalar_int_mode orig_mode, scalar_int_mode mode,
10533 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10534 {
10535 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10536
10537 /* In general we can't perform in wider mode for right shift and rotate. */
10538 switch (code)
10539 {
10540 case ASHIFTRT:
10541 /* We can still widen if the bits brought in from the left are identical
10542 to the sign bit of ORIG_MODE. */
10543 if (num_sign_bit_copies (op, mode)
10544 > (unsigned) (GET_MODE_PRECISION (mode)
10545 - GET_MODE_PRECISION (orig_mode)))
10546 return mode;
10547 return orig_mode;
10548
10549 case LSHIFTRT:
10550 /* Similarly here but with zero bits. */
10551 if (HWI_COMPUTABLE_MODE_P (mode)
10552 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10553 return mode;
10554
10555 /* We can also widen if the bits brought in will be masked off. This
10556 operation is performed in ORIG_MODE. */
10557 if (outer_code == AND)
10558 {
10559 int care_bits = low_bitmask_len (orig_mode, outer_const);
10560
10561 if (care_bits >= 0
10562 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10563 return mode;
10564 }
10565 /* fall through */
10566
10567 case ROTATE:
10568 return orig_mode;
10569
10570 case ROTATERT:
10571 gcc_unreachable ();
10572
10573 default:
10574 return mode;
10575 }
10576 }
10577
10578 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10579 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10580 if we cannot simplify it. Otherwise, return a simplified value.
10581
10582 The shift is normally computed in the widest mode we find in VAROP, as
10583 long as it isn't a different number of words than RESULT_MODE. Exceptions
10584 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10585
10586 static rtx
10587 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10588 rtx varop, int orig_count)
10589 {
10590 enum rtx_code orig_code = code;
10591 rtx orig_varop = varop;
10592 int count, log2;
10593 machine_mode mode = result_mode;
10594 machine_mode shift_mode;
10595 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10596 /* We form (outer_op (code varop count) (outer_const)). */
10597 enum rtx_code outer_op = UNKNOWN;
10598 HOST_WIDE_INT outer_const = 0;
10599 int complement_p = 0;
10600 rtx new_rtx, x;
10601
10602 /* Make sure and truncate the "natural" shift on the way in. We don't
10603 want to do this inside the loop as it makes it more difficult to
10604 combine shifts. */
10605 if (SHIFT_COUNT_TRUNCATED)
10606 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10607
10608 /* If we were given an invalid count, don't do anything except exactly
10609 what was requested. */
10610
10611 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10612 return NULL_RTX;
10613
10614 count = orig_count;
10615
10616 /* Unless one of the branches of the `if' in this loop does a `continue',
10617 we will `break' the loop after the `if'. */
10618
10619 while (count != 0)
10620 {
10621 /* If we have an operand of (clobber (const_int 0)), fail. */
10622 if (GET_CODE (varop) == CLOBBER)
10623 return NULL_RTX;
10624
10625 /* Convert ROTATERT to ROTATE. */
10626 if (code == ROTATERT)
10627 {
10628 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10629 code = ROTATE;
10630 count = bitsize - count;
10631 }
10632
10633 shift_mode = result_mode;
10634 if (shift_mode != mode)
10635 {
10636 /* We only change the modes of scalar shifts. */
10637 int_mode = as_a <scalar_int_mode> (mode);
10638 int_result_mode = as_a <scalar_int_mode> (result_mode);
10639 shift_mode = try_widen_shift_mode (code, varop, count,
10640 int_result_mode, int_mode,
10641 outer_op, outer_const);
10642 }
10643
10644 scalar_int_mode shift_unit_mode
10645 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10646
10647 /* Handle cases where the count is greater than the size of the mode
10648 minus 1. For ASHIFT, use the size minus one as the count (this can
10649 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10650 take the count modulo the size. For other shifts, the result is
10651 zero.
10652
10653 Since these shifts are being produced by the compiler by combining
10654 multiple operations, each of which are defined, we know what the
10655 result is supposed to be. */
10656
10657 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10658 {
10659 if (code == ASHIFTRT)
10660 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10661 else if (code == ROTATE || code == ROTATERT)
10662 count %= GET_MODE_PRECISION (shift_unit_mode);
10663 else
10664 {
10665 /* We can't simply return zero because there may be an
10666 outer op. */
10667 varop = const0_rtx;
10668 count = 0;
10669 break;
10670 }
10671 }
10672
10673 /* If we discovered we had to complement VAROP, leave. Making a NOT
10674 here would cause an infinite loop. */
10675 if (complement_p)
10676 break;
10677
10678 if (shift_mode == shift_unit_mode)
10679 {
10680 /* An arithmetic right shift of a quantity known to be -1 or 0
10681 is a no-op. */
10682 if (code == ASHIFTRT
10683 && (num_sign_bit_copies (varop, shift_unit_mode)
10684 == GET_MODE_PRECISION (shift_unit_mode)))
10685 {
10686 count = 0;
10687 break;
10688 }
10689
10690 /* If we are doing an arithmetic right shift and discarding all but
10691 the sign bit copies, this is equivalent to doing a shift by the
10692 bitsize minus one. Convert it into that shift because it will
10693 often allow other simplifications. */
10694
10695 if (code == ASHIFTRT
10696 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10697 >= GET_MODE_PRECISION (shift_unit_mode)))
10698 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10699
10700 /* We simplify the tests below and elsewhere by converting
10701 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10702 `make_compound_operation' will convert it to an ASHIFTRT for
10703 those machines (such as VAX) that don't have an LSHIFTRT. */
10704 if (code == ASHIFTRT
10705 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10706 && val_signbit_known_clear_p (shift_unit_mode,
10707 nonzero_bits (varop,
10708 shift_unit_mode)))
10709 code = LSHIFTRT;
10710
10711 if (((code == LSHIFTRT
10712 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10713 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10714 || (code == ASHIFT
10715 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10716 && !((nonzero_bits (varop, shift_unit_mode) << count)
10717 & GET_MODE_MASK (shift_unit_mode))))
10718 && !side_effects_p (varop))
10719 varop = const0_rtx;
10720 }
10721
10722 switch (GET_CODE (varop))
10723 {
10724 case SIGN_EXTEND:
10725 case ZERO_EXTEND:
10726 case SIGN_EXTRACT:
10727 case ZERO_EXTRACT:
10728 new_rtx = expand_compound_operation (varop);
10729 if (new_rtx != varop)
10730 {
10731 varop = new_rtx;
10732 continue;
10733 }
10734 break;
10735
10736 case MEM:
10737 /* The following rules apply only to scalars. */
10738 if (shift_mode != shift_unit_mode)
10739 break;
10740 int_mode = as_a <scalar_int_mode> (mode);
10741
10742 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10743 minus the width of a smaller mode, we can do this with a
10744 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10745 if ((code == ASHIFTRT || code == LSHIFTRT)
10746 && ! mode_dependent_address_p (XEXP (varop, 0),
10747 MEM_ADDR_SPACE (varop))
10748 && ! MEM_VOLATILE_P (varop)
10749 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10750 .exists (&tmode)))
10751 {
10752 new_rtx = adjust_address_nv (varop, tmode,
10753 BYTES_BIG_ENDIAN ? 0
10754 : count / BITS_PER_UNIT);
10755
10756 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10757 : ZERO_EXTEND, int_mode, new_rtx);
10758 count = 0;
10759 continue;
10760 }
10761 break;
10762
10763 case SUBREG:
10764 /* The following rules apply only to scalars. */
10765 if (shift_mode != shift_unit_mode)
10766 break;
10767 int_mode = as_a <scalar_int_mode> (mode);
10768 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10769
10770 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10771 the same number of words as what we've seen so far. Then store
10772 the widest mode in MODE. */
10773 if (subreg_lowpart_p (varop)
10774 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10775 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10776 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10777 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10778 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10779 {
10780 varop = SUBREG_REG (varop);
10781 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10782 mode = inner_mode;
10783 continue;
10784 }
10785 break;
10786
10787 case MULT:
10788 /* Some machines use MULT instead of ASHIFT because MULT
10789 is cheaper. But it is still better on those machines to
10790 merge two shifts into one. */
10791 if (CONST_INT_P (XEXP (varop, 1))
10792 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10793 {
10794 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10795 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10796 XEXP (varop, 0), log2_rtx);
10797 continue;
10798 }
10799 break;
10800
10801 case UDIV:
10802 /* Similar, for when divides are cheaper. */
10803 if (CONST_INT_P (XEXP (varop, 1))
10804 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10805 {
10806 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10807 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10808 XEXP (varop, 0), log2_rtx);
10809 continue;
10810 }
10811 break;
10812
10813 case ASHIFTRT:
10814 /* If we are extracting just the sign bit of an arithmetic
10815 right shift, that shift is not needed. However, the sign
10816 bit of a wider mode may be different from what would be
10817 interpreted as the sign bit in a narrower mode, so, if
10818 the result is narrower, don't discard the shift. */
10819 if (code == LSHIFTRT
10820 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10821 && (GET_MODE_UNIT_BITSIZE (result_mode)
10822 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10823 {
10824 varop = XEXP (varop, 0);
10825 continue;
10826 }
10827
10828 /* fall through */
10829
10830 case LSHIFTRT:
10831 case ASHIFT:
10832 case ROTATE:
10833 /* The following rules apply only to scalars. */
10834 if (shift_mode != shift_unit_mode)
10835 break;
10836 int_mode = as_a <scalar_int_mode> (mode);
10837 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10838 int_result_mode = as_a <scalar_int_mode> (result_mode);
10839
10840 /* Here we have two nested shifts. The result is usually the
10841 AND of a new shift with a mask. We compute the result below. */
10842 if (CONST_INT_P (XEXP (varop, 1))
10843 && INTVAL (XEXP (varop, 1)) >= 0
10844 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10845 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10846 && HWI_COMPUTABLE_MODE_P (int_mode))
10847 {
10848 enum rtx_code first_code = GET_CODE (varop);
10849 unsigned int first_count = INTVAL (XEXP (varop, 1));
10850 unsigned HOST_WIDE_INT mask;
10851 rtx mask_rtx;
10852
10853 /* We have one common special case. We can't do any merging if
10854 the inner code is an ASHIFTRT of a smaller mode. However, if
10855 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10856 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10857 we can convert it to
10858 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10859 This simplifies certain SIGN_EXTEND operations. */
10860 if (code == ASHIFT && first_code == ASHIFTRT
10861 && count == (GET_MODE_PRECISION (int_result_mode)
10862 - GET_MODE_PRECISION (int_varop_mode)))
10863 {
10864 /* C3 has the low-order C1 bits zero. */
10865
10866 mask = GET_MODE_MASK (int_mode)
10867 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10868
10869 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10870 XEXP (varop, 0), mask);
10871 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10872 int_result_mode, varop, count);
10873 count = first_count;
10874 code = ASHIFTRT;
10875 continue;
10876 }
10877
10878 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10879 than C1 high-order bits equal to the sign bit, we can convert
10880 this to either an ASHIFT or an ASHIFTRT depending on the
10881 two counts.
10882
10883 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10884
10885 if (code == ASHIFTRT && first_code == ASHIFT
10886 && int_varop_mode == shift_unit_mode
10887 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10888 > first_count))
10889 {
10890 varop = XEXP (varop, 0);
10891 count -= first_count;
10892 if (count < 0)
10893 {
10894 count = -count;
10895 code = ASHIFT;
10896 }
10897
10898 continue;
10899 }
10900
10901 /* There are some cases we can't do. If CODE is ASHIFTRT,
10902 we can only do this if FIRST_CODE is also ASHIFTRT.
10903
10904 We can't do the case when CODE is ROTATE and FIRST_CODE is
10905 ASHIFTRT.
10906
10907 If the mode of this shift is not the mode of the outer shift,
10908 we can't do this if either shift is a right shift or ROTATE.
10909
10910 Finally, we can't do any of these if the mode is too wide
10911 unless the codes are the same.
10912
10913 Handle the case where the shift codes are the same
10914 first. */
10915
10916 if (code == first_code)
10917 {
10918 if (int_varop_mode != int_result_mode
10919 && (code == ASHIFTRT || code == LSHIFTRT
10920 || code == ROTATE))
10921 break;
10922
10923 count += first_count;
10924 varop = XEXP (varop, 0);
10925 continue;
10926 }
10927
10928 if (code == ASHIFTRT
10929 || (code == ROTATE && first_code == ASHIFTRT)
10930 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10931 || (int_varop_mode != int_result_mode
10932 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10933 || first_code == ROTATE
10934 || code == ROTATE)))
10935 break;
10936
10937 /* To compute the mask to apply after the shift, shift the
10938 nonzero bits of the inner shift the same way the
10939 outer shift will. */
10940
10941 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10942 int_result_mode);
10943 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10944 mask_rtx
10945 = simplify_const_binary_operation (code, int_result_mode,
10946 mask_rtx, count_rtx);
10947
10948 /* Give up if we can't compute an outer operation to use. */
10949 if (mask_rtx == 0
10950 || !CONST_INT_P (mask_rtx)
10951 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10952 INTVAL (mask_rtx),
10953 int_result_mode, &complement_p))
10954 break;
10955
10956 /* If the shifts are in the same direction, we add the
10957 counts. Otherwise, we subtract them. */
10958 if ((code == ASHIFTRT || code == LSHIFTRT)
10959 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10960 count += first_count;
10961 else
10962 count -= first_count;
10963
10964 /* If COUNT is positive, the new shift is usually CODE,
10965 except for the two exceptions below, in which case it is
10966 FIRST_CODE. If the count is negative, FIRST_CODE should
10967 always be used */
10968 if (count > 0
10969 && ((first_code == ROTATE && code == ASHIFT)
10970 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10971 code = first_code;
10972 else if (count < 0)
10973 code = first_code, count = -count;
10974
10975 varop = XEXP (varop, 0);
10976 continue;
10977 }
10978
10979 /* If we have (A << B << C) for any shift, we can convert this to
10980 (A << C << B). This wins if A is a constant. Only try this if
10981 B is not a constant. */
10982
10983 else if (GET_CODE (varop) == code
10984 && CONST_INT_P (XEXP (varop, 0))
10985 && !CONST_INT_P (XEXP (varop, 1)))
10986 {
10987 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10988 sure the result will be masked. See PR70222. */
10989 if (code == LSHIFTRT
10990 && int_mode != int_result_mode
10991 && !merge_outer_ops (&outer_op, &outer_const, AND,
10992 GET_MODE_MASK (int_result_mode)
10993 >> orig_count, int_result_mode,
10994 &complement_p))
10995 break;
10996 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10997 up outer sign extension (often left and right shift) is
10998 hardly more efficient than the original. See PR70429. */
10999 if (code == ASHIFTRT && int_mode != int_result_mode)
11000 break;
11001
11002 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
11003 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
11004 XEXP (varop, 0),
11005 count_rtx);
11006 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
11007 count = 0;
11008 continue;
11009 }
11010 break;
11011
11012 case NOT:
11013 /* The following rules apply only to scalars. */
11014 if (shift_mode != shift_unit_mode)
11015 break;
11016
11017 /* Make this fit the case below. */
11018 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
11019 continue;
11020
11021 case IOR:
11022 case AND:
11023 case XOR:
11024 /* The following rules apply only to scalars. */
11025 if (shift_mode != shift_unit_mode)
11026 break;
11027 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11028 int_result_mode = as_a <scalar_int_mode> (result_mode);
11029
11030 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11031 with C the size of VAROP - 1 and the shift is logical if
11032 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11033 we have an (le X 0) operation. If we have an arithmetic shift
11034 and STORE_FLAG_VALUE is 1 or we have a logical shift with
11035 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
11036
11037 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
11038 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
11039 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11040 && (code == LSHIFTRT || code == ASHIFTRT)
11041 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11042 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11043 {
11044 count = 0;
11045 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
11046 const0_rtx);
11047
11048 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11049 varop = gen_rtx_NEG (int_varop_mode, varop);
11050
11051 continue;
11052 }
11053
11054 /* If we have (shift (logical)), move the logical to the outside
11055 to allow it to possibly combine with another logical and the
11056 shift to combine with another shift. This also canonicalizes to
11057 what a ZERO_EXTRACT looks like. Also, some machines have
11058 (and (shift)) insns. */
11059
11060 if (CONST_INT_P (XEXP (varop, 1))
11061 /* We can't do this if we have (ashiftrt (xor)) and the
11062 constant has its sign bit set in shift_unit_mode with
11063 shift_unit_mode wider than result_mode. */
11064 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11065 && int_result_mode != shift_unit_mode
11066 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11067 shift_unit_mode) < 0)
11068 && (new_rtx = simplify_const_binary_operation
11069 (code, int_result_mode,
11070 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11071 gen_int_shift_amount (int_result_mode, count))) != 0
11072 && CONST_INT_P (new_rtx)
11073 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
11074 INTVAL (new_rtx), int_result_mode,
11075 &complement_p))
11076 {
11077 varop = XEXP (varop, 0);
11078 continue;
11079 }
11080
11081 /* If we can't do that, try to simplify the shift in each arm of the
11082 logical expression, make a new logical expression, and apply
11083 the inverse distributive law. This also can't be done for
11084 (ashiftrt (xor)) where we've widened the shift and the constant
11085 changes the sign bit. */
11086 if (CONST_INT_P (XEXP (varop, 1))
11087 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11088 && int_result_mode != shift_unit_mode
11089 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11090 shift_unit_mode) < 0))
11091 {
11092 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11093 XEXP (varop, 0), count);
11094 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11095 XEXP (varop, 1), count);
11096
11097 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11098 lhs, rhs);
11099 varop = apply_distributive_law (varop);
11100
11101 count = 0;
11102 continue;
11103 }
11104 break;
11105
11106 case EQ:
11107 /* The following rules apply only to scalars. */
11108 if (shift_mode != shift_unit_mode)
11109 break;
11110 int_result_mode = as_a <scalar_int_mode> (result_mode);
11111
11112 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11113 says that the sign bit can be tested, FOO has mode MODE, C is
11114 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11115 that may be nonzero. */
11116 if (code == LSHIFTRT
11117 && XEXP (varop, 1) == const0_rtx
11118 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11119 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11120 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11121 && STORE_FLAG_VALUE == -1
11122 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11123 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11124 int_result_mode, &complement_p))
11125 {
11126 varop = XEXP (varop, 0);
11127 count = 0;
11128 continue;
11129 }
11130 break;
11131
11132 case NEG:
11133 /* The following rules apply only to scalars. */
11134 if (shift_mode != shift_unit_mode)
11135 break;
11136 int_result_mode = as_a <scalar_int_mode> (result_mode);
11137
11138 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11139 than the number of bits in the mode is equivalent to A. */
11140 if (code == LSHIFTRT
11141 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11142 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11143 {
11144 varop = XEXP (varop, 0);
11145 count = 0;
11146 continue;
11147 }
11148
11149 /* NEG commutes with ASHIFT since it is multiplication. Move the
11150 NEG outside to allow shifts to combine. */
11151 if (code == ASHIFT
11152 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11153 int_result_mode, &complement_p))
11154 {
11155 varop = XEXP (varop, 0);
11156 continue;
11157 }
11158 break;
11159
11160 case PLUS:
11161 /* The following rules apply only to scalars. */
11162 if (shift_mode != shift_unit_mode)
11163 break;
11164 int_result_mode = as_a <scalar_int_mode> (result_mode);
11165
11166 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11167 is one less than the number of bits in the mode is
11168 equivalent to (xor A 1). */
11169 if (code == LSHIFTRT
11170 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11171 && XEXP (varop, 1) == constm1_rtx
11172 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11173 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11174 int_result_mode, &complement_p))
11175 {
11176 count = 0;
11177 varop = XEXP (varop, 0);
11178 continue;
11179 }
11180
11181 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11182 that might be nonzero in BAR are those being shifted out and those
11183 bits are known zero in FOO, we can replace the PLUS with FOO.
11184 Similarly in the other operand order. This code occurs when
11185 we are computing the size of a variable-size array. */
11186
11187 if ((code == ASHIFTRT || code == LSHIFTRT)
11188 && count < HOST_BITS_PER_WIDE_INT
11189 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11190 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11191 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11192 {
11193 varop = XEXP (varop, 0);
11194 continue;
11195 }
11196 else if ((code == ASHIFTRT || code == LSHIFTRT)
11197 && count < HOST_BITS_PER_WIDE_INT
11198 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11199 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11200 >> count) == 0
11201 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11202 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11203 {
11204 varop = XEXP (varop, 1);
11205 continue;
11206 }
11207
11208 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11209 if (code == ASHIFT
11210 && CONST_INT_P (XEXP (varop, 1))
11211 && (new_rtx = simplify_const_binary_operation
11212 (ASHIFT, int_result_mode,
11213 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11214 gen_int_shift_amount (int_result_mode, count))) != 0
11215 && CONST_INT_P (new_rtx)
11216 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11217 INTVAL (new_rtx), int_result_mode,
11218 &complement_p))
11219 {
11220 varop = XEXP (varop, 0);
11221 continue;
11222 }
11223
11224 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11225 signbit', and attempt to change the PLUS to an XOR and move it to
11226 the outer operation as is done above in the AND/IOR/XOR case
11227 leg for shift(logical). See details in logical handling above
11228 for reasoning in doing so. */
11229 if (code == LSHIFTRT
11230 && CONST_INT_P (XEXP (varop, 1))
11231 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11232 && (new_rtx = simplify_const_binary_operation
11233 (code, int_result_mode,
11234 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11235 gen_int_shift_amount (int_result_mode, count))) != 0
11236 && CONST_INT_P (new_rtx)
11237 && merge_outer_ops (&outer_op, &outer_const, XOR,
11238 INTVAL (new_rtx), int_result_mode,
11239 &complement_p))
11240 {
11241 varop = XEXP (varop, 0);
11242 continue;
11243 }
11244
11245 break;
11246
11247 case MINUS:
11248 /* The following rules apply only to scalars. */
11249 if (shift_mode != shift_unit_mode)
11250 break;
11251 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11252
11253 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11254 with C the size of VAROP - 1 and the shift is logical if
11255 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11256 we have a (gt X 0) operation. If the shift is arithmetic with
11257 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11258 we have a (neg (gt X 0)) operation. */
11259
11260 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11261 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11262 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11263 && (code == LSHIFTRT || code == ASHIFTRT)
11264 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11265 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11266 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11267 {
11268 count = 0;
11269 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11270 const0_rtx);
11271
11272 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11273 varop = gen_rtx_NEG (int_varop_mode, varop);
11274
11275 continue;
11276 }
11277 break;
11278
11279 case TRUNCATE:
11280 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11281 if the truncate does not affect the value. */
11282 if (code == LSHIFTRT
11283 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11284 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11285 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11286 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11287 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11288 {
11289 rtx varop_inner = XEXP (varop, 0);
11290 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11291 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11292 new_count);
11293 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11294 XEXP (varop_inner, 0),
11295 new_count_rtx);
11296 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11297 count = 0;
11298 continue;
11299 }
11300 break;
11301
11302 default:
11303 break;
11304 }
11305
11306 break;
11307 }
11308
11309 shift_mode = result_mode;
11310 if (shift_mode != mode)
11311 {
11312 /* We only change the modes of scalar shifts. */
11313 int_mode = as_a <scalar_int_mode> (mode);
11314 int_result_mode = as_a <scalar_int_mode> (result_mode);
11315 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11316 int_mode, outer_op, outer_const);
11317 }
11318
11319 /* We have now finished analyzing the shift. The result should be
11320 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11321 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11322 to the result of the shift. OUTER_CONST is the relevant constant,
11323 but we must turn off all bits turned off in the shift. */
11324
11325 if (outer_op == UNKNOWN
11326 && orig_code == code && orig_count == count
11327 && varop == orig_varop
11328 && shift_mode == GET_MODE (varop))
11329 return NULL_RTX;
11330
11331 /* Make a SUBREG if necessary. If we can't make it, fail. */
11332 varop = gen_lowpart (shift_mode, varop);
11333 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11334 return NULL_RTX;
11335
11336 /* If we have an outer operation and we just made a shift, it is
11337 possible that we could have simplified the shift were it not
11338 for the outer operation. So try to do the simplification
11339 recursively. */
11340
11341 if (outer_op != UNKNOWN)
11342 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11343 else
11344 x = NULL_RTX;
11345
11346 if (x == NULL_RTX)
11347 x = simplify_gen_binary (code, shift_mode, varop,
11348 gen_int_shift_amount (shift_mode, count));
11349
11350 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11351 turn off all the bits that the shift would have turned off. */
11352 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11353 /* We only change the modes of scalar shifts. */
11354 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11355 x, GET_MODE_MASK (result_mode) >> orig_count);
11356
11357 /* Do the remainder of the processing in RESULT_MODE. */
11358 x = gen_lowpart_or_truncate (result_mode, x);
11359
11360 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11361 operation. */
11362 if (complement_p)
11363 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11364
11365 if (outer_op != UNKNOWN)
11366 {
11367 int_result_mode = as_a <scalar_int_mode> (result_mode);
11368
11369 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11370 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11371 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11372
11373 if (outer_op == AND)
11374 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11375 else if (outer_op == SET)
11376 {
11377 /* This means that we have determined that the result is
11378 equivalent to a constant. This should be rare. */
11379 if (!side_effects_p (x))
11380 x = GEN_INT (outer_const);
11381 }
11382 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11383 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11384 else
11385 x = simplify_gen_binary (outer_op, int_result_mode, x,
11386 GEN_INT (outer_const));
11387 }
11388
11389 return x;
11390 }
11391
11392 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11393 The result of the shift is RESULT_MODE. If we cannot simplify it,
11394 return X or, if it is NULL, synthesize the expression with
11395 simplify_gen_binary. Otherwise, return a simplified value.
11396
11397 The shift is normally computed in the widest mode we find in VAROP, as
11398 long as it isn't a different number of words than RESULT_MODE. Exceptions
11399 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11400
11401 static rtx
11402 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11403 rtx varop, int count)
11404 {
11405 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11406 if (tem)
11407 return tem;
11408
11409 if (!x)
11410 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11411 gen_int_shift_amount (GET_MODE (varop), count));
11412 if (GET_MODE (x) != result_mode)
11413 x = gen_lowpart (result_mode, x);
11414 return x;
11415 }
11416
11417 \f
11418 /* A subroutine of recog_for_combine. See there for arguments and
11419 return value. */
11420
11421 static int
11422 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11423 {
11424 rtx pat = *pnewpat;
11425 rtx pat_without_clobbers;
11426 int insn_code_number;
11427 int num_clobbers_to_add = 0;
11428 int i;
11429 rtx notes = NULL_RTX;
11430 rtx old_notes, old_pat;
11431 int old_icode;
11432
11433 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11434 we use to indicate that something didn't match. If we find such a
11435 thing, force rejection. */
11436 if (GET_CODE (pat) == PARALLEL)
11437 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11438 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11439 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11440 return -1;
11441
11442 old_pat = PATTERN (insn);
11443 old_notes = REG_NOTES (insn);
11444 PATTERN (insn) = pat;
11445 REG_NOTES (insn) = NULL_RTX;
11446
11447 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11448 if (dump_file && (dump_flags & TDF_DETAILS))
11449 {
11450 if (insn_code_number < 0)
11451 fputs ("Failed to match this instruction:\n", dump_file);
11452 else
11453 fputs ("Successfully matched this instruction:\n", dump_file);
11454 print_rtl_single (dump_file, pat);
11455 }
11456
11457 /* If it isn't, there is the possibility that we previously had an insn
11458 that clobbered some register as a side effect, but the combined
11459 insn doesn't need to do that. So try once more without the clobbers
11460 unless this represents an ASM insn. */
11461
11462 if (insn_code_number < 0 && ! check_asm_operands (pat)
11463 && GET_CODE (pat) == PARALLEL)
11464 {
11465 int pos;
11466
11467 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11468 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11469 {
11470 if (i != pos)
11471 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11472 pos++;
11473 }
11474
11475 SUBST_INT (XVECLEN (pat, 0), pos);
11476
11477 if (pos == 1)
11478 pat = XVECEXP (pat, 0, 0);
11479
11480 PATTERN (insn) = pat;
11481 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11482 if (dump_file && (dump_flags & TDF_DETAILS))
11483 {
11484 if (insn_code_number < 0)
11485 fputs ("Failed to match this instruction:\n", dump_file);
11486 else
11487 fputs ("Successfully matched this instruction:\n", dump_file);
11488 print_rtl_single (dump_file, pat);
11489 }
11490 }
11491
11492 pat_without_clobbers = pat;
11493
11494 PATTERN (insn) = old_pat;
11495 REG_NOTES (insn) = old_notes;
11496
11497 /* Recognize all noop sets, these will be killed by followup pass. */
11498 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11499 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11500
11501 /* If we had any clobbers to add, make a new pattern than contains
11502 them. Then check to make sure that all of them are dead. */
11503 if (num_clobbers_to_add)
11504 {
11505 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11506 rtvec_alloc (GET_CODE (pat) == PARALLEL
11507 ? (XVECLEN (pat, 0)
11508 + num_clobbers_to_add)
11509 : num_clobbers_to_add + 1));
11510
11511 if (GET_CODE (pat) == PARALLEL)
11512 for (i = 0; i < XVECLEN (pat, 0); i++)
11513 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11514 else
11515 XVECEXP (newpat, 0, 0) = pat;
11516
11517 add_clobbers (newpat, insn_code_number);
11518
11519 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11520 i < XVECLEN (newpat, 0); i++)
11521 {
11522 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11523 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11524 return -1;
11525 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11526 {
11527 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11528 notes = alloc_reg_note (REG_UNUSED,
11529 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11530 }
11531 }
11532 pat = newpat;
11533 }
11534
11535 if (insn_code_number >= 0
11536 && insn_code_number != NOOP_MOVE_INSN_CODE)
11537 {
11538 old_pat = PATTERN (insn);
11539 old_notes = REG_NOTES (insn);
11540 old_icode = INSN_CODE (insn);
11541 PATTERN (insn) = pat;
11542 REG_NOTES (insn) = notes;
11543 INSN_CODE (insn) = insn_code_number;
11544
11545 /* Allow targets to reject combined insn. */
11546 if (!targetm.legitimate_combined_insn (insn))
11547 {
11548 if (dump_file && (dump_flags & TDF_DETAILS))
11549 fputs ("Instruction not appropriate for target.",
11550 dump_file);
11551
11552 /* Callers expect recog_for_combine to strip
11553 clobbers from the pattern on failure. */
11554 pat = pat_without_clobbers;
11555 notes = NULL_RTX;
11556
11557 insn_code_number = -1;
11558 }
11559
11560 PATTERN (insn) = old_pat;
11561 REG_NOTES (insn) = old_notes;
11562 INSN_CODE (insn) = old_icode;
11563 }
11564
11565 *pnewpat = pat;
11566 *pnotes = notes;
11567
11568 return insn_code_number;
11569 }
11570
11571 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11572 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11573 Return whether anything was so changed. */
11574
11575 static bool
11576 change_zero_ext (rtx pat)
11577 {
11578 bool changed = false;
11579 rtx *src = &SET_SRC (pat);
11580
11581 subrtx_ptr_iterator::array_type array;
11582 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11583 {
11584 rtx x = **iter;
11585 scalar_int_mode mode, inner_mode;
11586 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11587 continue;
11588 int size;
11589
11590 if (GET_CODE (x) == ZERO_EXTRACT
11591 && CONST_INT_P (XEXP (x, 1))
11592 && CONST_INT_P (XEXP (x, 2))
11593 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11594 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11595 {
11596 size = INTVAL (XEXP (x, 1));
11597
11598 int start = INTVAL (XEXP (x, 2));
11599 if (BITS_BIG_ENDIAN)
11600 start = GET_MODE_PRECISION (inner_mode) - size - start;
11601
11602 if (start != 0)
11603 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11604 gen_int_shift_amount (inner_mode, start));
11605 else
11606 x = XEXP (x, 0);
11607
11608 if (mode != inner_mode)
11609 {
11610 if (REG_P (x) && HARD_REGISTER_P (x)
11611 && !can_change_dest_mode (x, 0, mode))
11612 continue;
11613
11614 x = gen_lowpart_SUBREG (mode, x);
11615 }
11616 }
11617 else if (GET_CODE (x) == ZERO_EXTEND
11618 && GET_CODE (XEXP (x, 0)) == SUBREG
11619 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11620 && !paradoxical_subreg_p (XEXP (x, 0))
11621 && subreg_lowpart_p (XEXP (x, 0)))
11622 {
11623 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11624 size = GET_MODE_PRECISION (inner_mode);
11625 x = SUBREG_REG (XEXP (x, 0));
11626 if (GET_MODE (x) != mode)
11627 {
11628 if (REG_P (x) && HARD_REGISTER_P (x)
11629 && !can_change_dest_mode (x, 0, mode))
11630 continue;
11631
11632 x = gen_lowpart_SUBREG (mode, x);
11633 }
11634 }
11635 else if (GET_CODE (x) == ZERO_EXTEND
11636 && REG_P (XEXP (x, 0))
11637 && HARD_REGISTER_P (XEXP (x, 0))
11638 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11639 {
11640 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11641 size = GET_MODE_PRECISION (inner_mode);
11642 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11643 }
11644 else
11645 continue;
11646
11647 if (!(GET_CODE (x) == LSHIFTRT
11648 && CONST_INT_P (XEXP (x, 1))
11649 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11650 {
11651 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11652 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11653 }
11654
11655 SUBST (**iter, x);
11656 changed = true;
11657 }
11658
11659 if (changed)
11660 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11661 maybe_swap_commutative_operands (**iter);
11662
11663 rtx *dst = &SET_DEST (pat);
11664 scalar_int_mode mode;
11665 if (GET_CODE (*dst) == ZERO_EXTRACT
11666 && REG_P (XEXP (*dst, 0))
11667 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11668 && CONST_INT_P (XEXP (*dst, 1))
11669 && CONST_INT_P (XEXP (*dst, 2)))
11670 {
11671 rtx reg = XEXP (*dst, 0);
11672 int width = INTVAL (XEXP (*dst, 1));
11673 int offset = INTVAL (XEXP (*dst, 2));
11674 int reg_width = GET_MODE_PRECISION (mode);
11675 if (BITS_BIG_ENDIAN)
11676 offset = reg_width - width - offset;
11677
11678 rtx x, y, z, w;
11679 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11680 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11681 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11682 if (offset)
11683 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11684 else
11685 y = SET_SRC (pat);
11686 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11687 w = gen_rtx_IOR (mode, x, z);
11688 SUBST (SET_DEST (pat), reg);
11689 SUBST (SET_SRC (pat), w);
11690
11691 changed = true;
11692 }
11693
11694 return changed;
11695 }
11696
11697 /* Like recog, but we receive the address of a pointer to a new pattern.
11698 We try to match the rtx that the pointer points to.
11699 If that fails, we may try to modify or replace the pattern,
11700 storing the replacement into the same pointer object.
11701
11702 Modifications include deletion or addition of CLOBBERs. If the
11703 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11704 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11705 (and undo if that fails).
11706
11707 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11708 the CLOBBERs are placed.
11709
11710 The value is the final insn code from the pattern ultimately matched,
11711 or -1. */
11712
11713 static int
11714 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11715 {
11716 rtx pat = *pnewpat;
11717 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11718 if (insn_code_number >= 0 || check_asm_operands (pat))
11719 return insn_code_number;
11720
11721 void *marker = get_undo_marker ();
11722 bool changed = false;
11723
11724 if (GET_CODE (pat) == SET)
11725 changed = change_zero_ext (pat);
11726 else if (GET_CODE (pat) == PARALLEL)
11727 {
11728 int i;
11729 for (i = 0; i < XVECLEN (pat, 0); i++)
11730 {
11731 rtx set = XVECEXP (pat, 0, i);
11732 if (GET_CODE (set) == SET)
11733 changed |= change_zero_ext (set);
11734 }
11735 }
11736
11737 if (changed)
11738 {
11739 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11740
11741 if (insn_code_number < 0)
11742 undo_to_marker (marker);
11743 }
11744
11745 return insn_code_number;
11746 }
11747 \f
11748 /* Like gen_lowpart_general but for use by combine. In combine it
11749 is not possible to create any new pseudoregs. However, it is
11750 safe to create invalid memory addresses, because combine will
11751 try to recognize them and all they will do is make the combine
11752 attempt fail.
11753
11754 If for some reason this cannot do its job, an rtx
11755 (clobber (const_int 0)) is returned.
11756 An insn containing that will not be recognized. */
11757
11758 static rtx
11759 gen_lowpart_for_combine (machine_mode omode, rtx x)
11760 {
11761 machine_mode imode = GET_MODE (x);
11762 rtx result;
11763
11764 if (omode == imode)
11765 return x;
11766
11767 /* We can only support MODE being wider than a word if X is a
11768 constant integer or has a mode the same size. */
11769 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11770 && ! (CONST_SCALAR_INT_P (x)
11771 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11772 goto fail;
11773
11774 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11775 won't know what to do. So we will strip off the SUBREG here and
11776 process normally. */
11777 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11778 {
11779 x = SUBREG_REG (x);
11780
11781 /* For use in case we fall down into the address adjustments
11782 further below, we need to adjust the known mode and size of
11783 x; imode and isize, since we just adjusted x. */
11784 imode = GET_MODE (x);
11785
11786 if (imode == omode)
11787 return x;
11788 }
11789
11790 result = gen_lowpart_common (omode, x);
11791
11792 if (result)
11793 return result;
11794
11795 if (MEM_P (x))
11796 {
11797 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11798 address. */
11799 if (MEM_VOLATILE_P (x)
11800 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11801 goto fail;
11802
11803 /* If we want to refer to something bigger than the original memref,
11804 generate a paradoxical subreg instead. That will force a reload
11805 of the original memref X. */
11806 if (paradoxical_subreg_p (omode, imode))
11807 return gen_rtx_SUBREG (omode, x, 0);
11808
11809 poly_int64 offset = byte_lowpart_offset (omode, imode);
11810 return adjust_address_nv (x, omode, offset);
11811 }
11812
11813 /* If X is a comparison operator, rewrite it in a new mode. This
11814 probably won't match, but may allow further simplifications. */
11815 else if (COMPARISON_P (x)
11816 && SCALAR_INT_MODE_P (imode)
11817 && SCALAR_INT_MODE_P (omode))
11818 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11819
11820 /* If we couldn't simplify X any other way, just enclose it in a
11821 SUBREG. Normally, this SUBREG won't match, but some patterns may
11822 include an explicit SUBREG or we may simplify it further in combine. */
11823 else
11824 {
11825 rtx res;
11826
11827 if (imode == VOIDmode)
11828 {
11829 imode = int_mode_for_mode (omode).require ();
11830 x = gen_lowpart_common (imode, x);
11831 if (x == NULL)
11832 goto fail;
11833 }
11834 res = lowpart_subreg (omode, x, imode);
11835 if (res)
11836 return res;
11837 }
11838
11839 fail:
11840 return gen_rtx_CLOBBER (omode, const0_rtx);
11841 }
11842 \f
11843 /* Try to simplify a comparison between OP0 and a constant OP1,
11844 where CODE is the comparison code that will be tested, into a
11845 (CODE OP0 const0_rtx) form.
11846
11847 The result is a possibly different comparison code to use.
11848 *POP1 may be updated. */
11849
11850 static enum rtx_code
11851 simplify_compare_const (enum rtx_code code, machine_mode mode,
11852 rtx op0, rtx *pop1)
11853 {
11854 scalar_int_mode int_mode;
11855 HOST_WIDE_INT const_op = INTVAL (*pop1);
11856
11857 /* Get the constant we are comparing against and turn off all bits
11858 not on in our mode. */
11859 if (mode != VOIDmode)
11860 const_op = trunc_int_for_mode (const_op, mode);
11861
11862 /* If we are comparing against a constant power of two and the value
11863 being compared can only have that single bit nonzero (e.g., it was
11864 `and'ed with that bit), we can replace this with a comparison
11865 with zero. */
11866 if (const_op
11867 && (code == EQ || code == NE || code == GE || code == GEU
11868 || code == LT || code == LTU)
11869 && is_a <scalar_int_mode> (mode, &int_mode)
11870 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11871 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11872 && (nonzero_bits (op0, int_mode)
11873 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11874 {
11875 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11876 const_op = 0;
11877 }
11878
11879 /* Similarly, if we are comparing a value known to be either -1 or
11880 0 with -1, change it to the opposite comparison against zero. */
11881 if (const_op == -1
11882 && (code == EQ || code == NE || code == GT || code == LE
11883 || code == GEU || code == LTU)
11884 && is_a <scalar_int_mode> (mode, &int_mode)
11885 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11886 {
11887 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11888 const_op = 0;
11889 }
11890
11891 /* Do some canonicalizations based on the comparison code. We prefer
11892 comparisons against zero and then prefer equality comparisons.
11893 If we can reduce the size of a constant, we will do that too. */
11894 switch (code)
11895 {
11896 case LT:
11897 /* < C is equivalent to <= (C - 1) */
11898 if (const_op > 0)
11899 {
11900 const_op -= 1;
11901 code = LE;
11902 /* ... fall through to LE case below. */
11903 gcc_fallthrough ();
11904 }
11905 else
11906 break;
11907
11908 case LE:
11909 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11910 if (const_op < 0)
11911 {
11912 const_op += 1;
11913 code = LT;
11914 }
11915
11916 /* If we are doing a <= 0 comparison on a value known to have
11917 a zero sign bit, we can replace this with == 0. */
11918 else if (const_op == 0
11919 && is_a <scalar_int_mode> (mode, &int_mode)
11920 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11921 && (nonzero_bits (op0, int_mode)
11922 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11923 == 0)
11924 code = EQ;
11925 break;
11926
11927 case GE:
11928 /* >= C is equivalent to > (C - 1). */
11929 if (const_op > 0)
11930 {
11931 const_op -= 1;
11932 code = GT;
11933 /* ... fall through to GT below. */
11934 gcc_fallthrough ();
11935 }
11936 else
11937 break;
11938
11939 case GT:
11940 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11941 if (const_op < 0)
11942 {
11943 const_op += 1;
11944 code = GE;
11945 }
11946
11947 /* If we are doing a > 0 comparison on a value known to have
11948 a zero sign bit, we can replace this with != 0. */
11949 else if (const_op == 0
11950 && is_a <scalar_int_mode> (mode, &int_mode)
11951 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11952 && (nonzero_bits (op0, int_mode)
11953 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11954 == 0)
11955 code = NE;
11956 break;
11957
11958 case LTU:
11959 /* < C is equivalent to <= (C - 1). */
11960 if (const_op > 0)
11961 {
11962 const_op -= 1;
11963 code = LEU;
11964 /* ... fall through ... */
11965 gcc_fallthrough ();
11966 }
11967 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11968 else if (is_a <scalar_int_mode> (mode, &int_mode)
11969 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11970 && ((unsigned HOST_WIDE_INT) const_op
11971 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11972 {
11973 const_op = 0;
11974 code = GE;
11975 break;
11976 }
11977 else
11978 break;
11979
11980 case LEU:
11981 /* unsigned <= 0 is equivalent to == 0 */
11982 if (const_op == 0)
11983 code = EQ;
11984 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11985 else if (is_a <scalar_int_mode> (mode, &int_mode)
11986 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11987 && ((unsigned HOST_WIDE_INT) const_op
11988 == ((HOST_WIDE_INT_1U
11989 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11990 {
11991 const_op = 0;
11992 code = GE;
11993 }
11994 break;
11995
11996 case GEU:
11997 /* >= C is equivalent to > (C - 1). */
11998 if (const_op > 1)
11999 {
12000 const_op -= 1;
12001 code = GTU;
12002 /* ... fall through ... */
12003 gcc_fallthrough ();
12004 }
12005
12006 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
12007 else if (is_a <scalar_int_mode> (mode, &int_mode)
12008 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12009 && ((unsigned HOST_WIDE_INT) const_op
12010 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
12011 {
12012 const_op = 0;
12013 code = LT;
12014 break;
12015 }
12016 else
12017 break;
12018
12019 case GTU:
12020 /* unsigned > 0 is equivalent to != 0 */
12021 if (const_op == 0)
12022 code = NE;
12023 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
12024 else if (is_a <scalar_int_mode> (mode, &int_mode)
12025 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12026 && ((unsigned HOST_WIDE_INT) const_op
12027 == (HOST_WIDE_INT_1U
12028 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
12029 {
12030 const_op = 0;
12031 code = LT;
12032 }
12033 break;
12034
12035 default:
12036 break;
12037 }
12038
12039 *pop1 = GEN_INT (const_op);
12040 return code;
12041 }
12042 \f
12043 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12044 comparison code that will be tested.
12045
12046 The result is a possibly different comparison code to use. *POP0 and
12047 *POP1 may be updated.
12048
12049 It is possible that we might detect that a comparison is either always
12050 true or always false. However, we do not perform general constant
12051 folding in combine, so this knowledge isn't useful. Such tautologies
12052 should have been detected earlier. Hence we ignore all such cases. */
12053
12054 static enum rtx_code
12055 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
12056 {
12057 rtx op0 = *pop0;
12058 rtx op1 = *pop1;
12059 rtx tem, tem1;
12060 int i;
12061 scalar_int_mode mode, inner_mode, tmode;
12062 opt_scalar_int_mode tmode_iter;
12063
12064 /* Try a few ways of applying the same transformation to both operands. */
12065 while (1)
12066 {
12067 /* The test below this one won't handle SIGN_EXTENDs on these machines,
12068 so check specially. */
12069 if (!WORD_REGISTER_OPERATIONS
12070 && code != GTU && code != GEU && code != LTU && code != LEU
12071 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
12072 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12073 && GET_CODE (XEXP (op1, 0)) == ASHIFT
12074 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12075 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12076 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12077 && (is_a <scalar_int_mode>
12078 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12079 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12080 && CONST_INT_P (XEXP (op0, 1))
12081 && XEXP (op0, 1) == XEXP (op1, 1)
12082 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12083 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12084 && (INTVAL (XEXP (op0, 1))
12085 == (GET_MODE_PRECISION (mode)
12086 - GET_MODE_PRECISION (inner_mode))))
12087 {
12088 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12089 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12090 }
12091
12092 /* If both operands are the same constant shift, see if we can ignore the
12093 shift. We can if the shift is a rotate or if the bits shifted out of
12094 this shift are known to be zero for both inputs and if the type of
12095 comparison is compatible with the shift. */
12096 if (GET_CODE (op0) == GET_CODE (op1)
12097 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12098 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12099 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12100 && (code != GT && code != LT && code != GE && code != LE))
12101 || (GET_CODE (op0) == ASHIFTRT
12102 && (code != GTU && code != LTU
12103 && code != GEU && code != LEU)))
12104 && CONST_INT_P (XEXP (op0, 1))
12105 && INTVAL (XEXP (op0, 1)) >= 0
12106 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12107 && XEXP (op0, 1) == XEXP (op1, 1))
12108 {
12109 machine_mode mode = GET_MODE (op0);
12110 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12111 int shift_count = INTVAL (XEXP (op0, 1));
12112
12113 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12114 mask &= (mask >> shift_count) << shift_count;
12115 else if (GET_CODE (op0) == ASHIFT)
12116 mask = (mask & (mask << shift_count)) >> shift_count;
12117
12118 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12119 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12120 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12121 else
12122 break;
12123 }
12124
12125 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12126 SUBREGs are of the same mode, and, in both cases, the AND would
12127 be redundant if the comparison was done in the narrower mode,
12128 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12129 and the operand's possibly nonzero bits are 0xffffff01; in that case
12130 if we only care about QImode, we don't need the AND). This case
12131 occurs if the output mode of an scc insn is not SImode and
12132 STORE_FLAG_VALUE == 1 (e.g., the 386).
12133
12134 Similarly, check for a case where the AND's are ZERO_EXTEND
12135 operations from some narrower mode even though a SUBREG is not
12136 present. */
12137
12138 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12139 && CONST_INT_P (XEXP (op0, 1))
12140 && CONST_INT_P (XEXP (op1, 1)))
12141 {
12142 rtx inner_op0 = XEXP (op0, 0);
12143 rtx inner_op1 = XEXP (op1, 0);
12144 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12145 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12146 int changed = 0;
12147
12148 if (paradoxical_subreg_p (inner_op0)
12149 && GET_CODE (inner_op1) == SUBREG
12150 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12151 && (GET_MODE (SUBREG_REG (inner_op0))
12152 == GET_MODE (SUBREG_REG (inner_op1)))
12153 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12154 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12155 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12156 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12157 {
12158 op0 = SUBREG_REG (inner_op0);
12159 op1 = SUBREG_REG (inner_op1);
12160
12161 /* The resulting comparison is always unsigned since we masked
12162 off the original sign bit. */
12163 code = unsigned_condition (code);
12164
12165 changed = 1;
12166 }
12167
12168 else if (c0 == c1)
12169 FOR_EACH_MODE_UNTIL (tmode,
12170 as_a <scalar_int_mode> (GET_MODE (op0)))
12171 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12172 {
12173 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12174 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12175 code = unsigned_condition (code);
12176 changed = 1;
12177 break;
12178 }
12179
12180 if (! changed)
12181 break;
12182 }
12183
12184 /* If both operands are NOT, we can strip off the outer operation
12185 and adjust the comparison code for swapped operands; similarly for
12186 NEG, except that this must be an equality comparison. */
12187 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12188 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12189 && (code == EQ || code == NE)))
12190 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12191
12192 else
12193 break;
12194 }
12195
12196 /* If the first operand is a constant, swap the operands and adjust the
12197 comparison code appropriately, but don't do this if the second operand
12198 is already a constant integer. */
12199 if (swap_commutative_operands_p (op0, op1))
12200 {
12201 std::swap (op0, op1);
12202 code = swap_condition (code);
12203 }
12204
12205 /* We now enter a loop during which we will try to simplify the comparison.
12206 For the most part, we only are concerned with comparisons with zero,
12207 but some things may really be comparisons with zero but not start
12208 out looking that way. */
12209
12210 while (CONST_INT_P (op1))
12211 {
12212 machine_mode raw_mode = GET_MODE (op0);
12213 scalar_int_mode int_mode;
12214 int equality_comparison_p;
12215 int sign_bit_comparison_p;
12216 int unsigned_comparison_p;
12217 HOST_WIDE_INT const_op;
12218
12219 /* We only want to handle integral modes. This catches VOIDmode,
12220 CCmode, and the floating-point modes. An exception is that we
12221 can handle VOIDmode if OP0 is a COMPARE or a comparison
12222 operation. */
12223
12224 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12225 && ! (raw_mode == VOIDmode
12226 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12227 break;
12228
12229 /* Try to simplify the compare to constant, possibly changing the
12230 comparison op, and/or changing op1 to zero. */
12231 code = simplify_compare_const (code, raw_mode, op0, &op1);
12232 const_op = INTVAL (op1);
12233
12234 /* Compute some predicates to simplify code below. */
12235
12236 equality_comparison_p = (code == EQ || code == NE);
12237 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12238 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12239 || code == GEU);
12240
12241 /* If this is a sign bit comparison and we can do arithmetic in
12242 MODE, say that we will only be needing the sign bit of OP0. */
12243 if (sign_bit_comparison_p
12244 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12245 && HWI_COMPUTABLE_MODE_P (int_mode))
12246 op0 = force_to_mode (op0, int_mode,
12247 HOST_WIDE_INT_1U
12248 << (GET_MODE_PRECISION (int_mode) - 1),
12249 0);
12250
12251 if (COMPARISON_P (op0))
12252 {
12253 /* We can't do anything if OP0 is a condition code value, rather
12254 than an actual data value. */
12255 if (const_op != 0
12256 || CC0_P (XEXP (op0, 0))
12257 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12258 break;
12259
12260 /* Get the two operands being compared. */
12261 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12262 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12263 else
12264 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12265
12266 /* Check for the cases where we simply want the result of the
12267 earlier test or the opposite of that result. */
12268 if (code == NE || code == EQ
12269 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12270 && (code == LT || code == GE)))
12271 {
12272 enum rtx_code new_code;
12273 if (code == LT || code == NE)
12274 new_code = GET_CODE (op0);
12275 else
12276 new_code = reversed_comparison_code (op0, NULL);
12277
12278 if (new_code != UNKNOWN)
12279 {
12280 code = new_code;
12281 op0 = tem;
12282 op1 = tem1;
12283 continue;
12284 }
12285 }
12286 break;
12287 }
12288
12289 if (raw_mode == VOIDmode)
12290 break;
12291 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12292
12293 /* Now try cases based on the opcode of OP0. If none of the cases
12294 does a "continue", we exit this loop immediately after the
12295 switch. */
12296
12297 unsigned int mode_width = GET_MODE_PRECISION (mode);
12298 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12299 switch (GET_CODE (op0))
12300 {
12301 case ZERO_EXTRACT:
12302 /* If we are extracting a single bit from a variable position in
12303 a constant that has only a single bit set and are comparing it
12304 with zero, we can convert this into an equality comparison
12305 between the position and the location of the single bit. */
12306 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12307 have already reduced the shift count modulo the word size. */
12308 if (!SHIFT_COUNT_TRUNCATED
12309 && CONST_INT_P (XEXP (op0, 0))
12310 && XEXP (op0, 1) == const1_rtx
12311 && equality_comparison_p && const_op == 0
12312 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12313 {
12314 if (BITS_BIG_ENDIAN)
12315 i = BITS_PER_WORD - 1 - i;
12316
12317 op0 = XEXP (op0, 2);
12318 op1 = GEN_INT (i);
12319 const_op = i;
12320
12321 /* Result is nonzero iff shift count is equal to I. */
12322 code = reverse_condition (code);
12323 continue;
12324 }
12325
12326 /* fall through */
12327
12328 case SIGN_EXTRACT:
12329 tem = expand_compound_operation (op0);
12330 if (tem != op0)
12331 {
12332 op0 = tem;
12333 continue;
12334 }
12335 break;
12336
12337 case NOT:
12338 /* If testing for equality, we can take the NOT of the constant. */
12339 if (equality_comparison_p
12340 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12341 {
12342 op0 = XEXP (op0, 0);
12343 op1 = tem;
12344 continue;
12345 }
12346
12347 /* If just looking at the sign bit, reverse the sense of the
12348 comparison. */
12349 if (sign_bit_comparison_p)
12350 {
12351 op0 = XEXP (op0, 0);
12352 code = (code == GE ? LT : GE);
12353 continue;
12354 }
12355 break;
12356
12357 case NEG:
12358 /* If testing for equality, we can take the NEG of the constant. */
12359 if (equality_comparison_p
12360 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12361 {
12362 op0 = XEXP (op0, 0);
12363 op1 = tem;
12364 continue;
12365 }
12366
12367 /* The remaining cases only apply to comparisons with zero. */
12368 if (const_op != 0)
12369 break;
12370
12371 /* When X is ABS or is known positive,
12372 (neg X) is < 0 if and only if X != 0. */
12373
12374 if (sign_bit_comparison_p
12375 && (GET_CODE (XEXP (op0, 0)) == ABS
12376 || (mode_width <= HOST_BITS_PER_WIDE_INT
12377 && (nonzero_bits (XEXP (op0, 0), mode)
12378 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12379 == 0)))
12380 {
12381 op0 = XEXP (op0, 0);
12382 code = (code == LT ? NE : EQ);
12383 continue;
12384 }
12385
12386 /* If we have NEG of something whose two high-order bits are the
12387 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12388 if (num_sign_bit_copies (op0, mode) >= 2)
12389 {
12390 op0 = XEXP (op0, 0);
12391 code = swap_condition (code);
12392 continue;
12393 }
12394 break;
12395
12396 case ROTATE:
12397 /* If we are testing equality and our count is a constant, we
12398 can perform the inverse operation on our RHS. */
12399 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12400 && (tem = simplify_binary_operation (ROTATERT, mode,
12401 op1, XEXP (op0, 1))) != 0)
12402 {
12403 op0 = XEXP (op0, 0);
12404 op1 = tem;
12405 continue;
12406 }
12407
12408 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12409 a particular bit. Convert it to an AND of a constant of that
12410 bit. This will be converted into a ZERO_EXTRACT. */
12411 if (const_op == 0 && sign_bit_comparison_p
12412 && CONST_INT_P (XEXP (op0, 1))
12413 && mode_width <= HOST_BITS_PER_WIDE_INT
12414 && UINTVAL (XEXP (op0, 1)) < mode_width)
12415 {
12416 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12417 (HOST_WIDE_INT_1U
12418 << (mode_width - 1
12419 - INTVAL (XEXP (op0, 1)))));
12420 code = (code == LT ? NE : EQ);
12421 continue;
12422 }
12423
12424 /* Fall through. */
12425
12426 case ABS:
12427 /* ABS is ignorable inside an equality comparison with zero. */
12428 if (const_op == 0 && equality_comparison_p)
12429 {
12430 op0 = XEXP (op0, 0);
12431 continue;
12432 }
12433 break;
12434
12435 case SIGN_EXTEND:
12436 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12437 (compare FOO CONST) if CONST fits in FOO's mode and we
12438 are either testing inequality or have an unsigned
12439 comparison with ZERO_EXTEND or a signed comparison with
12440 SIGN_EXTEND. But don't do it if we don't have a compare
12441 insn of the given mode, since we'd have to revert it
12442 later on, and then we wouldn't know whether to sign- or
12443 zero-extend. */
12444 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12445 && ! unsigned_comparison_p
12446 && HWI_COMPUTABLE_MODE_P (mode)
12447 && trunc_int_for_mode (const_op, mode) == const_op
12448 && have_insn_for (COMPARE, mode))
12449 {
12450 op0 = XEXP (op0, 0);
12451 continue;
12452 }
12453 break;
12454
12455 case SUBREG:
12456 /* Check for the case where we are comparing A - C1 with C2, that is
12457
12458 (subreg:MODE (plus (A) (-C1))) op (C2)
12459
12460 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12461 comparison in the wider mode. One of the following two conditions
12462 must be true in order for this to be valid:
12463
12464 1. The mode extension results in the same bit pattern being added
12465 on both sides and the comparison is equality or unsigned. As
12466 C2 has been truncated to fit in MODE, the pattern can only be
12467 all 0s or all 1s.
12468
12469 2. The mode extension results in the sign bit being copied on
12470 each side.
12471
12472 The difficulty here is that we have predicates for A but not for
12473 (A - C1) so we need to check that C1 is within proper bounds so
12474 as to perturbate A as little as possible. */
12475
12476 if (mode_width <= HOST_BITS_PER_WIDE_INT
12477 && subreg_lowpart_p (op0)
12478 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12479 &inner_mode)
12480 && GET_MODE_PRECISION (inner_mode) > mode_width
12481 && GET_CODE (SUBREG_REG (op0)) == PLUS
12482 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12483 {
12484 rtx a = XEXP (SUBREG_REG (op0), 0);
12485 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12486
12487 if ((c1 > 0
12488 && (unsigned HOST_WIDE_INT) c1
12489 < HOST_WIDE_INT_1U << (mode_width - 1)
12490 && (equality_comparison_p || unsigned_comparison_p)
12491 /* (A - C1) zero-extends if it is positive and sign-extends
12492 if it is negative, C2 both zero- and sign-extends. */
12493 && (((nonzero_bits (a, inner_mode)
12494 & ~GET_MODE_MASK (mode)) == 0
12495 && const_op >= 0)
12496 /* (A - C1) sign-extends if it is positive and 1-extends
12497 if it is negative, C2 both sign- and 1-extends. */
12498 || (num_sign_bit_copies (a, inner_mode)
12499 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12500 - mode_width)
12501 && const_op < 0)))
12502 || ((unsigned HOST_WIDE_INT) c1
12503 < HOST_WIDE_INT_1U << (mode_width - 2)
12504 /* (A - C1) always sign-extends, like C2. */
12505 && num_sign_bit_copies (a, inner_mode)
12506 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12507 - (mode_width - 1))))
12508 {
12509 op0 = SUBREG_REG (op0);
12510 continue;
12511 }
12512 }
12513
12514 /* If the inner mode is narrower and we are extracting the low part,
12515 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12516 if (paradoxical_subreg_p (op0))
12517 ;
12518 else if (subreg_lowpart_p (op0)
12519 && GET_MODE_CLASS (mode) == MODE_INT
12520 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12521 && (code == NE || code == EQ)
12522 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12523 && !paradoxical_subreg_p (op0)
12524 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12525 & ~GET_MODE_MASK (mode)) == 0)
12526 {
12527 /* Remove outer subregs that don't do anything. */
12528 tem = gen_lowpart (inner_mode, op1);
12529
12530 if ((nonzero_bits (tem, inner_mode)
12531 & ~GET_MODE_MASK (mode)) == 0)
12532 {
12533 op0 = SUBREG_REG (op0);
12534 op1 = tem;
12535 continue;
12536 }
12537 break;
12538 }
12539 else
12540 break;
12541
12542 /* FALLTHROUGH */
12543
12544 case ZERO_EXTEND:
12545 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12546 && (unsigned_comparison_p || equality_comparison_p)
12547 && HWI_COMPUTABLE_MODE_P (mode)
12548 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12549 && const_op >= 0
12550 && have_insn_for (COMPARE, mode))
12551 {
12552 op0 = XEXP (op0, 0);
12553 continue;
12554 }
12555 break;
12556
12557 case PLUS:
12558 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12559 this for equality comparisons due to pathological cases involving
12560 overflows. */
12561 if (equality_comparison_p
12562 && (tem = simplify_binary_operation (MINUS, mode,
12563 op1, XEXP (op0, 1))) != 0)
12564 {
12565 op0 = XEXP (op0, 0);
12566 op1 = tem;
12567 continue;
12568 }
12569
12570 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12571 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12572 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12573 {
12574 op0 = XEXP (XEXP (op0, 0), 0);
12575 code = (code == LT ? EQ : NE);
12576 continue;
12577 }
12578 break;
12579
12580 case MINUS:
12581 /* We used to optimize signed comparisons against zero, but that
12582 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12583 arrive here as equality comparisons, or (GEU, LTU) are
12584 optimized away. No need to special-case them. */
12585
12586 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12587 (eq B (minus A C)), whichever simplifies. We can only do
12588 this for equality comparisons due to pathological cases involving
12589 overflows. */
12590 if (equality_comparison_p
12591 && (tem = simplify_binary_operation (PLUS, mode,
12592 XEXP (op0, 1), op1)) != 0)
12593 {
12594 op0 = XEXP (op0, 0);
12595 op1 = tem;
12596 continue;
12597 }
12598
12599 if (equality_comparison_p
12600 && (tem = simplify_binary_operation (MINUS, mode,
12601 XEXP (op0, 0), op1)) != 0)
12602 {
12603 op0 = XEXP (op0, 1);
12604 op1 = tem;
12605 continue;
12606 }
12607
12608 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12609 of bits in X minus 1, is one iff X > 0. */
12610 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12611 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12612 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12613 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12614 {
12615 op0 = XEXP (op0, 1);
12616 code = (code == GE ? LE : GT);
12617 continue;
12618 }
12619 break;
12620
12621 case XOR:
12622 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12623 if C is zero or B is a constant. */
12624 if (equality_comparison_p
12625 && (tem = simplify_binary_operation (XOR, mode,
12626 XEXP (op0, 1), op1)) != 0)
12627 {
12628 op0 = XEXP (op0, 0);
12629 op1 = tem;
12630 continue;
12631 }
12632 break;
12633
12634
12635 case IOR:
12636 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12637 iff X <= 0. */
12638 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12639 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12640 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12641 {
12642 op0 = XEXP (op0, 1);
12643 code = (code == GE ? GT : LE);
12644 continue;
12645 }
12646 break;
12647
12648 case AND:
12649 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12650 will be converted to a ZERO_EXTRACT later. */
12651 if (const_op == 0 && equality_comparison_p
12652 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12653 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12654 {
12655 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12656 XEXP (XEXP (op0, 0), 1));
12657 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12658 continue;
12659 }
12660
12661 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12662 zero and X is a comparison and C1 and C2 describe only bits set
12663 in STORE_FLAG_VALUE, we can compare with X. */
12664 if (const_op == 0 && equality_comparison_p
12665 && mode_width <= HOST_BITS_PER_WIDE_INT
12666 && CONST_INT_P (XEXP (op0, 1))
12667 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12668 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12669 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12670 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12671 {
12672 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12673 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12674 if ((~STORE_FLAG_VALUE & mask) == 0
12675 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12676 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12677 && COMPARISON_P (tem))))
12678 {
12679 op0 = XEXP (XEXP (op0, 0), 0);
12680 continue;
12681 }
12682 }
12683
12684 /* If we are doing an equality comparison of an AND of a bit equal
12685 to the sign bit, replace this with a LT or GE comparison of
12686 the underlying value. */
12687 if (equality_comparison_p
12688 && const_op == 0
12689 && CONST_INT_P (XEXP (op0, 1))
12690 && mode_width <= HOST_BITS_PER_WIDE_INT
12691 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12692 == HOST_WIDE_INT_1U << (mode_width - 1)))
12693 {
12694 op0 = XEXP (op0, 0);
12695 code = (code == EQ ? GE : LT);
12696 continue;
12697 }
12698
12699 /* If this AND operation is really a ZERO_EXTEND from a narrower
12700 mode, the constant fits within that mode, and this is either an
12701 equality or unsigned comparison, try to do this comparison in
12702 the narrower mode.
12703
12704 Note that in:
12705
12706 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12707 -> (ne:DI (reg:SI 4) (const_int 0))
12708
12709 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12710 known to hold a value of the required mode the
12711 transformation is invalid. */
12712 if ((equality_comparison_p || unsigned_comparison_p)
12713 && CONST_INT_P (XEXP (op0, 1))
12714 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12715 & GET_MODE_MASK (mode))
12716 + 1)) >= 0
12717 && const_op >> i == 0
12718 && int_mode_for_size (i, 1).exists (&tmode))
12719 {
12720 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12721 continue;
12722 }
12723
12724 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12725 fits in both M1 and M2 and the SUBREG is either paradoxical
12726 or represents the low part, permute the SUBREG and the AND
12727 and try again. */
12728 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12729 && CONST_INT_P (XEXP (op0, 1)))
12730 {
12731 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12732 /* Require an integral mode, to avoid creating something like
12733 (AND:SF ...). */
12734 if ((is_a <scalar_int_mode>
12735 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12736 /* It is unsafe to commute the AND into the SUBREG if the
12737 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12738 not defined. As originally written the upper bits
12739 have a defined value due to the AND operation.
12740 However, if we commute the AND inside the SUBREG then
12741 they no longer have defined values and the meaning of
12742 the code has been changed.
12743 Also C1 should not change value in the smaller mode,
12744 see PR67028 (a positive C1 can become negative in the
12745 smaller mode, so that the AND does no longer mask the
12746 upper bits). */
12747 && ((WORD_REGISTER_OPERATIONS
12748 && mode_width > GET_MODE_PRECISION (tmode)
12749 && mode_width <= BITS_PER_WORD
12750 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12751 || (mode_width <= GET_MODE_PRECISION (tmode)
12752 && subreg_lowpart_p (XEXP (op0, 0))))
12753 && mode_width <= HOST_BITS_PER_WIDE_INT
12754 && HWI_COMPUTABLE_MODE_P (tmode)
12755 && (c1 & ~mask) == 0
12756 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12757 && c1 != mask
12758 && c1 != GET_MODE_MASK (tmode))
12759 {
12760 op0 = simplify_gen_binary (AND, tmode,
12761 SUBREG_REG (XEXP (op0, 0)),
12762 gen_int_mode (c1, tmode));
12763 op0 = gen_lowpart (mode, op0);
12764 continue;
12765 }
12766 }
12767
12768 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12769 if (const_op == 0 && equality_comparison_p
12770 && XEXP (op0, 1) == const1_rtx
12771 && GET_CODE (XEXP (op0, 0)) == NOT)
12772 {
12773 op0 = simplify_and_const_int (NULL_RTX, mode,
12774 XEXP (XEXP (op0, 0), 0), 1);
12775 code = (code == NE ? EQ : NE);
12776 continue;
12777 }
12778
12779 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12780 (eq (and (lshiftrt X) 1) 0).
12781 Also handle the case where (not X) is expressed using xor. */
12782 if (const_op == 0 && equality_comparison_p
12783 && XEXP (op0, 1) == const1_rtx
12784 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12785 {
12786 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12787 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12788
12789 if (GET_CODE (shift_op) == NOT
12790 || (GET_CODE (shift_op) == XOR
12791 && CONST_INT_P (XEXP (shift_op, 1))
12792 && CONST_INT_P (shift_count)
12793 && HWI_COMPUTABLE_MODE_P (mode)
12794 && (UINTVAL (XEXP (shift_op, 1))
12795 == HOST_WIDE_INT_1U
12796 << INTVAL (shift_count))))
12797 {
12798 op0
12799 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12800 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12801 code = (code == NE ? EQ : NE);
12802 continue;
12803 }
12804 }
12805 break;
12806
12807 case ASHIFT:
12808 /* If we have (compare (ashift FOO N) (const_int C)) and
12809 the high order N bits of FOO (N+1 if an inequality comparison)
12810 are known to be zero, we can do this by comparing FOO with C
12811 shifted right N bits so long as the low-order N bits of C are
12812 zero. */
12813 if (CONST_INT_P (XEXP (op0, 1))
12814 && INTVAL (XEXP (op0, 1)) >= 0
12815 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12816 < HOST_BITS_PER_WIDE_INT)
12817 && (((unsigned HOST_WIDE_INT) const_op
12818 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12819 - 1)) == 0)
12820 && mode_width <= HOST_BITS_PER_WIDE_INT
12821 && (nonzero_bits (XEXP (op0, 0), mode)
12822 & ~(mask >> (INTVAL (XEXP (op0, 1))
12823 + ! equality_comparison_p))) == 0)
12824 {
12825 /* We must perform a logical shift, not an arithmetic one,
12826 as we want the top N bits of C to be zero. */
12827 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12828
12829 temp >>= INTVAL (XEXP (op0, 1));
12830 op1 = gen_int_mode (temp, mode);
12831 op0 = XEXP (op0, 0);
12832 continue;
12833 }
12834
12835 /* If we are doing a sign bit comparison, it means we are testing
12836 a particular bit. Convert it to the appropriate AND. */
12837 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12838 && mode_width <= HOST_BITS_PER_WIDE_INT)
12839 {
12840 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12841 (HOST_WIDE_INT_1U
12842 << (mode_width - 1
12843 - INTVAL (XEXP (op0, 1)))));
12844 code = (code == LT ? NE : EQ);
12845 continue;
12846 }
12847
12848 /* If this an equality comparison with zero and we are shifting
12849 the low bit to the sign bit, we can convert this to an AND of the
12850 low-order bit. */
12851 if (const_op == 0 && equality_comparison_p
12852 && CONST_INT_P (XEXP (op0, 1))
12853 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12854 {
12855 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12856 continue;
12857 }
12858 break;
12859
12860 case ASHIFTRT:
12861 /* If this is an equality comparison with zero, we can do this
12862 as a logical shift, which might be much simpler. */
12863 if (equality_comparison_p && const_op == 0
12864 && CONST_INT_P (XEXP (op0, 1)))
12865 {
12866 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12867 XEXP (op0, 0),
12868 INTVAL (XEXP (op0, 1)));
12869 continue;
12870 }
12871
12872 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12873 do the comparison in a narrower mode. */
12874 if (! unsigned_comparison_p
12875 && CONST_INT_P (XEXP (op0, 1))
12876 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12877 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12878 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12879 .exists (&tmode))
12880 && (((unsigned HOST_WIDE_INT) const_op
12881 + (GET_MODE_MASK (tmode) >> 1) + 1)
12882 <= GET_MODE_MASK (tmode)))
12883 {
12884 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12885 continue;
12886 }
12887
12888 /* Likewise if OP0 is a PLUS of a sign extension with a
12889 constant, which is usually represented with the PLUS
12890 between the shifts. */
12891 if (! unsigned_comparison_p
12892 && CONST_INT_P (XEXP (op0, 1))
12893 && GET_CODE (XEXP (op0, 0)) == PLUS
12894 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12895 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12896 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12897 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12898 .exists (&tmode))
12899 && (((unsigned HOST_WIDE_INT) const_op
12900 + (GET_MODE_MASK (tmode) >> 1) + 1)
12901 <= GET_MODE_MASK (tmode)))
12902 {
12903 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12904 rtx add_const = XEXP (XEXP (op0, 0), 1);
12905 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12906 add_const, XEXP (op0, 1));
12907
12908 op0 = simplify_gen_binary (PLUS, tmode,
12909 gen_lowpart (tmode, inner),
12910 new_const);
12911 continue;
12912 }
12913
12914 /* FALLTHROUGH */
12915 case LSHIFTRT:
12916 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12917 the low order N bits of FOO are known to be zero, we can do this
12918 by comparing FOO with C shifted left N bits so long as no
12919 overflow occurs. Even if the low order N bits of FOO aren't known
12920 to be zero, if the comparison is >= or < we can use the same
12921 optimization and for > or <= by setting all the low
12922 order N bits in the comparison constant. */
12923 if (CONST_INT_P (XEXP (op0, 1))
12924 && INTVAL (XEXP (op0, 1)) > 0
12925 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12926 && mode_width <= HOST_BITS_PER_WIDE_INT
12927 && (((unsigned HOST_WIDE_INT) const_op
12928 + (GET_CODE (op0) != LSHIFTRT
12929 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12930 + 1)
12931 : 0))
12932 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12933 {
12934 unsigned HOST_WIDE_INT low_bits
12935 = (nonzero_bits (XEXP (op0, 0), mode)
12936 & ((HOST_WIDE_INT_1U
12937 << INTVAL (XEXP (op0, 1))) - 1));
12938 if (low_bits == 0 || !equality_comparison_p)
12939 {
12940 /* If the shift was logical, then we must make the condition
12941 unsigned. */
12942 if (GET_CODE (op0) == LSHIFTRT)
12943 code = unsigned_condition (code);
12944
12945 const_op = (unsigned HOST_WIDE_INT) const_op
12946 << INTVAL (XEXP (op0, 1));
12947 if (low_bits != 0
12948 && (code == GT || code == GTU
12949 || code == LE || code == LEU))
12950 const_op
12951 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12952 op1 = GEN_INT (const_op);
12953 op0 = XEXP (op0, 0);
12954 continue;
12955 }
12956 }
12957
12958 /* If we are using this shift to extract just the sign bit, we
12959 can replace this with an LT or GE comparison. */
12960 if (const_op == 0
12961 && (equality_comparison_p || sign_bit_comparison_p)
12962 && CONST_INT_P (XEXP (op0, 1))
12963 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12964 {
12965 op0 = XEXP (op0, 0);
12966 code = (code == NE || code == GT ? LT : GE);
12967 continue;
12968 }
12969 break;
12970
12971 default:
12972 break;
12973 }
12974
12975 break;
12976 }
12977
12978 /* Now make any compound operations involved in this comparison. Then,
12979 check for an outmost SUBREG on OP0 that is not doing anything or is
12980 paradoxical. The latter transformation must only be performed when
12981 it is known that the "extra" bits will be the same in op0 and op1 or
12982 that they don't matter. There are three cases to consider:
12983
12984 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12985 care bits and we can assume they have any convenient value. So
12986 making the transformation is safe.
12987
12988 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12989 In this case the upper bits of op0 are undefined. We should not make
12990 the simplification in that case as we do not know the contents of
12991 those bits.
12992
12993 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12994 In that case we know those bits are zeros or ones. We must also be
12995 sure that they are the same as the upper bits of op1.
12996
12997 We can never remove a SUBREG for a non-equality comparison because
12998 the sign bit is in a different place in the underlying object. */
12999
13000 rtx_code op0_mco_code = SET;
13001 if (op1 == const0_rtx)
13002 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
13003
13004 op0 = make_compound_operation (op0, op0_mco_code);
13005 op1 = make_compound_operation (op1, SET);
13006
13007 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
13008 && is_int_mode (GET_MODE (op0), &mode)
13009 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
13010 && (code == NE || code == EQ))
13011 {
13012 if (paradoxical_subreg_p (op0))
13013 {
13014 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
13015 implemented. */
13016 if (REG_P (SUBREG_REG (op0)))
13017 {
13018 op0 = SUBREG_REG (op0);
13019 op1 = gen_lowpart (inner_mode, op1);
13020 }
13021 }
13022 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
13023 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
13024 & ~GET_MODE_MASK (mode)) == 0)
13025 {
13026 tem = gen_lowpart (inner_mode, op1);
13027
13028 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
13029 op0 = SUBREG_REG (op0), op1 = tem;
13030 }
13031 }
13032
13033 /* We now do the opposite procedure: Some machines don't have compare
13034 insns in all modes. If OP0's mode is an integer mode smaller than a
13035 word and we can't do a compare in that mode, see if there is a larger
13036 mode for which we can do the compare. There are a number of cases in
13037 which we can use the wider mode. */
13038
13039 if (is_int_mode (GET_MODE (op0), &mode)
13040 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
13041 && ! have_insn_for (COMPARE, mode))
13042 FOR_EACH_WIDER_MODE (tmode_iter, mode)
13043 {
13044 tmode = tmode_iter.require ();
13045 if (!HWI_COMPUTABLE_MODE_P (tmode))
13046 break;
13047 if (have_insn_for (COMPARE, tmode))
13048 {
13049 int zero_extended;
13050
13051 /* If this is a test for negative, we can make an explicit
13052 test of the sign bit. Test this first so we can use
13053 a paradoxical subreg to extend OP0. */
13054
13055 if (op1 == const0_rtx && (code == LT || code == GE)
13056 && HWI_COMPUTABLE_MODE_P (mode))
13057 {
13058 unsigned HOST_WIDE_INT sign
13059 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
13060 op0 = simplify_gen_binary (AND, tmode,
13061 gen_lowpart (tmode, op0),
13062 gen_int_mode (sign, tmode));
13063 code = (code == LT) ? NE : EQ;
13064 break;
13065 }
13066
13067 /* If the only nonzero bits in OP0 and OP1 are those in the
13068 narrower mode and this is an equality or unsigned comparison,
13069 we can use the wider mode. Similarly for sign-extended
13070 values, in which case it is true for all comparisons. */
13071 zero_extended = ((code == EQ || code == NE
13072 || code == GEU || code == GTU
13073 || code == LEU || code == LTU)
13074 && (nonzero_bits (op0, tmode)
13075 & ~GET_MODE_MASK (mode)) == 0
13076 && ((CONST_INT_P (op1)
13077 || (nonzero_bits (op1, tmode)
13078 & ~GET_MODE_MASK (mode)) == 0)));
13079
13080 if (zero_extended
13081 || ((num_sign_bit_copies (op0, tmode)
13082 > (unsigned int) (GET_MODE_PRECISION (tmode)
13083 - GET_MODE_PRECISION (mode)))
13084 && (num_sign_bit_copies (op1, tmode)
13085 > (unsigned int) (GET_MODE_PRECISION (tmode)
13086 - GET_MODE_PRECISION (mode)))))
13087 {
13088 /* If OP0 is an AND and we don't have an AND in MODE either,
13089 make a new AND in the proper mode. */
13090 if (GET_CODE (op0) == AND
13091 && !have_insn_for (AND, mode))
13092 op0 = simplify_gen_binary (AND, tmode,
13093 gen_lowpart (tmode,
13094 XEXP (op0, 0)),
13095 gen_lowpart (tmode,
13096 XEXP (op0, 1)));
13097 else
13098 {
13099 if (zero_extended)
13100 {
13101 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13102 op0, mode);
13103 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13104 op1, mode);
13105 }
13106 else
13107 {
13108 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13109 op0, mode);
13110 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13111 op1, mode);
13112 }
13113 break;
13114 }
13115 }
13116 }
13117 }
13118
13119 /* We may have changed the comparison operands. Re-canonicalize. */
13120 if (swap_commutative_operands_p (op0, op1))
13121 {
13122 std::swap (op0, op1);
13123 code = swap_condition (code);
13124 }
13125
13126 /* If this machine only supports a subset of valid comparisons, see if we
13127 can convert an unsupported one into a supported one. */
13128 target_canonicalize_comparison (&code, &op0, &op1, 0);
13129
13130 *pop0 = op0;
13131 *pop1 = op1;
13132
13133 return code;
13134 }
13135 \f
13136 /* Utility function for record_value_for_reg. Count number of
13137 rtxs in X. */
13138 static int
13139 count_rtxs (rtx x)
13140 {
13141 enum rtx_code code = GET_CODE (x);
13142 const char *fmt;
13143 int i, j, ret = 1;
13144
13145 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13146 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13147 {
13148 rtx x0 = XEXP (x, 0);
13149 rtx x1 = XEXP (x, 1);
13150
13151 if (x0 == x1)
13152 return 1 + 2 * count_rtxs (x0);
13153
13154 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13155 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13156 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13157 return 2 + 2 * count_rtxs (x0)
13158 + count_rtxs (x == XEXP (x1, 0)
13159 ? XEXP (x1, 1) : XEXP (x1, 0));
13160
13161 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13162 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13163 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13164 return 2 + 2 * count_rtxs (x1)
13165 + count_rtxs (x == XEXP (x0, 0)
13166 ? XEXP (x0, 1) : XEXP (x0, 0));
13167 }
13168
13169 fmt = GET_RTX_FORMAT (code);
13170 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13171 if (fmt[i] == 'e')
13172 ret += count_rtxs (XEXP (x, i));
13173 else if (fmt[i] == 'E')
13174 for (j = 0; j < XVECLEN (x, i); j++)
13175 ret += count_rtxs (XVECEXP (x, i, j));
13176
13177 return ret;
13178 }
13179 \f
13180 /* Utility function for following routine. Called when X is part of a value
13181 being stored into last_set_value. Sets last_set_table_tick
13182 for each register mentioned. Similar to mention_regs in cse.c */
13183
13184 static void
13185 update_table_tick (rtx x)
13186 {
13187 enum rtx_code code = GET_CODE (x);
13188 const char *fmt = GET_RTX_FORMAT (code);
13189 int i, j;
13190
13191 if (code == REG)
13192 {
13193 unsigned int regno = REGNO (x);
13194 unsigned int endregno = END_REGNO (x);
13195 unsigned int r;
13196
13197 for (r = regno; r < endregno; r++)
13198 {
13199 reg_stat_type *rsp = &reg_stat[r];
13200 rsp->last_set_table_tick = label_tick;
13201 }
13202
13203 return;
13204 }
13205
13206 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13207 if (fmt[i] == 'e')
13208 {
13209 /* Check for identical subexpressions. If x contains
13210 identical subexpression we only have to traverse one of
13211 them. */
13212 if (i == 0 && ARITHMETIC_P (x))
13213 {
13214 /* Note that at this point x1 has already been
13215 processed. */
13216 rtx x0 = XEXP (x, 0);
13217 rtx x1 = XEXP (x, 1);
13218
13219 /* If x0 and x1 are identical then there is no need to
13220 process x0. */
13221 if (x0 == x1)
13222 break;
13223
13224 /* If x0 is identical to a subexpression of x1 then while
13225 processing x1, x0 has already been processed. Thus we
13226 are done with x. */
13227 if (ARITHMETIC_P (x1)
13228 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13229 break;
13230
13231 /* If x1 is identical to a subexpression of x0 then we
13232 still have to process the rest of x0. */
13233 if (ARITHMETIC_P (x0)
13234 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13235 {
13236 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13237 break;
13238 }
13239 }
13240
13241 update_table_tick (XEXP (x, i));
13242 }
13243 else if (fmt[i] == 'E')
13244 for (j = 0; j < XVECLEN (x, i); j++)
13245 update_table_tick (XVECEXP (x, i, j));
13246 }
13247
13248 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13249 are saying that the register is clobbered and we no longer know its
13250 value. If INSN is zero, don't update reg_stat[].last_set; this is
13251 only permitted with VALUE also zero and is used to invalidate the
13252 register. */
13253
13254 static void
13255 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13256 {
13257 unsigned int regno = REGNO (reg);
13258 unsigned int endregno = END_REGNO (reg);
13259 unsigned int i;
13260 reg_stat_type *rsp;
13261
13262 /* If VALUE contains REG and we have a previous value for REG, substitute
13263 the previous value. */
13264 if (value && insn && reg_overlap_mentioned_p (reg, value))
13265 {
13266 rtx tem;
13267
13268 /* Set things up so get_last_value is allowed to see anything set up to
13269 our insn. */
13270 subst_low_luid = DF_INSN_LUID (insn);
13271 tem = get_last_value (reg);
13272
13273 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13274 it isn't going to be useful and will take a lot of time to process,
13275 so just use the CLOBBER. */
13276
13277 if (tem)
13278 {
13279 if (ARITHMETIC_P (tem)
13280 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13281 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13282 tem = XEXP (tem, 0);
13283 else if (count_occurrences (value, reg, 1) >= 2)
13284 {
13285 /* If there are two or more occurrences of REG in VALUE,
13286 prevent the value from growing too much. */
13287 if (count_rtxs (tem) > param_max_last_value_rtl)
13288 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13289 }
13290
13291 value = replace_rtx (copy_rtx (value), reg, tem);
13292 }
13293 }
13294
13295 /* For each register modified, show we don't know its value, that
13296 we don't know about its bitwise content, that its value has been
13297 updated, and that we don't know the location of the death of the
13298 register. */
13299 for (i = regno; i < endregno; i++)
13300 {
13301 rsp = &reg_stat[i];
13302
13303 if (insn)
13304 rsp->last_set = insn;
13305
13306 rsp->last_set_value = 0;
13307 rsp->last_set_mode = VOIDmode;
13308 rsp->last_set_nonzero_bits = 0;
13309 rsp->last_set_sign_bit_copies = 0;
13310 rsp->last_death = 0;
13311 rsp->truncated_to_mode = VOIDmode;
13312 }
13313
13314 /* Mark registers that are being referenced in this value. */
13315 if (value)
13316 update_table_tick (value);
13317
13318 /* Now update the status of each register being set.
13319 If someone is using this register in this block, set this register
13320 to invalid since we will get confused between the two lives in this
13321 basic block. This makes using this register always invalid. In cse, we
13322 scan the table to invalidate all entries using this register, but this
13323 is too much work for us. */
13324
13325 for (i = regno; i < endregno; i++)
13326 {
13327 rsp = &reg_stat[i];
13328 rsp->last_set_label = label_tick;
13329 if (!insn
13330 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13331 rsp->last_set_invalid = 1;
13332 else
13333 rsp->last_set_invalid = 0;
13334 }
13335
13336 /* The value being assigned might refer to X (like in "x++;"). In that
13337 case, we must replace it with (clobber (const_int 0)) to prevent
13338 infinite loops. */
13339 rsp = &reg_stat[regno];
13340 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13341 {
13342 value = copy_rtx (value);
13343 if (!get_last_value_validate (&value, insn, label_tick, 1))
13344 value = 0;
13345 }
13346
13347 /* For the main register being modified, update the value, the mode, the
13348 nonzero bits, and the number of sign bit copies. */
13349
13350 rsp->last_set_value = value;
13351
13352 if (value)
13353 {
13354 machine_mode mode = GET_MODE (reg);
13355 subst_low_luid = DF_INSN_LUID (insn);
13356 rsp->last_set_mode = mode;
13357 if (GET_MODE_CLASS (mode) == MODE_INT
13358 && HWI_COMPUTABLE_MODE_P (mode))
13359 mode = nonzero_bits_mode;
13360 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13361 rsp->last_set_sign_bit_copies
13362 = num_sign_bit_copies (value, GET_MODE (reg));
13363 }
13364 }
13365
13366 /* Called via note_stores from record_dead_and_set_regs to handle one
13367 SET or CLOBBER in an insn. DATA is the instruction in which the
13368 set is occurring. */
13369
13370 static void
13371 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13372 {
13373 rtx_insn *record_dead_insn = (rtx_insn *) data;
13374
13375 if (GET_CODE (dest) == SUBREG)
13376 dest = SUBREG_REG (dest);
13377
13378 if (!record_dead_insn)
13379 {
13380 if (REG_P (dest))
13381 record_value_for_reg (dest, NULL, NULL_RTX);
13382 return;
13383 }
13384
13385 if (REG_P (dest))
13386 {
13387 /* If we are setting the whole register, we know its value. Otherwise
13388 show that we don't know the value. We can handle a SUBREG if it's
13389 the low part, but we must be careful with paradoxical SUBREGs on
13390 RISC architectures because we cannot strip e.g. an extension around
13391 a load and record the naked load since the RTL middle-end considers
13392 that the upper bits are defined according to LOAD_EXTEND_OP. */
13393 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13394 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13395 else if (GET_CODE (setter) == SET
13396 && GET_CODE (SET_DEST (setter)) == SUBREG
13397 && SUBREG_REG (SET_DEST (setter)) == dest
13398 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13399 BITS_PER_WORD)
13400 && subreg_lowpart_p (SET_DEST (setter)))
13401 record_value_for_reg (dest, record_dead_insn,
13402 WORD_REGISTER_OPERATIONS
13403 && word_register_operation_p (SET_SRC (setter))
13404 && paradoxical_subreg_p (SET_DEST (setter))
13405 ? SET_SRC (setter)
13406 : gen_lowpart (GET_MODE (dest),
13407 SET_SRC (setter)));
13408 else
13409 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13410 }
13411 else if (MEM_P (dest)
13412 /* Ignore pushes, they clobber nothing. */
13413 && ! push_operand (dest, GET_MODE (dest)))
13414 mem_last_set = DF_INSN_LUID (record_dead_insn);
13415 }
13416
13417 /* Update the records of when each REG was most recently set or killed
13418 for the things done by INSN. This is the last thing done in processing
13419 INSN in the combiner loop.
13420
13421 We update reg_stat[], in particular fields last_set, last_set_value,
13422 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13423 last_death, and also the similar information mem_last_set (which insn
13424 most recently modified memory) and last_call_luid (which insn was the
13425 most recent subroutine call). */
13426
13427 static void
13428 record_dead_and_set_regs (rtx_insn *insn)
13429 {
13430 rtx link;
13431 unsigned int i;
13432
13433 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13434 {
13435 if (REG_NOTE_KIND (link) == REG_DEAD
13436 && REG_P (XEXP (link, 0)))
13437 {
13438 unsigned int regno = REGNO (XEXP (link, 0));
13439 unsigned int endregno = END_REGNO (XEXP (link, 0));
13440
13441 for (i = regno; i < endregno; i++)
13442 {
13443 reg_stat_type *rsp;
13444
13445 rsp = &reg_stat[i];
13446 rsp->last_death = insn;
13447 }
13448 }
13449 else if (REG_NOTE_KIND (link) == REG_INC)
13450 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13451 }
13452
13453 if (CALL_P (insn))
13454 {
13455 HARD_REG_SET callee_clobbers
13456 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
13457 hard_reg_set_iterator hrsi;
13458 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, i, hrsi)
13459 {
13460 reg_stat_type *rsp;
13461
13462 /* ??? We could try to preserve some information from the last
13463 set of register I if the call doesn't actually clobber
13464 (reg:last_set_mode I), which might be true for ABIs with
13465 partial clobbers. However, it would be difficult to
13466 update last_set_nonzero_bits and last_sign_bit_copies
13467 to account for the part of I that actually was clobbered.
13468 It wouldn't help much anyway, since we rarely see this
13469 situation before RA. */
13470 rsp = &reg_stat[i];
13471 rsp->last_set_invalid = 1;
13472 rsp->last_set = insn;
13473 rsp->last_set_value = 0;
13474 rsp->last_set_mode = VOIDmode;
13475 rsp->last_set_nonzero_bits = 0;
13476 rsp->last_set_sign_bit_copies = 0;
13477 rsp->last_death = 0;
13478 rsp->truncated_to_mode = VOIDmode;
13479 }
13480
13481 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13482
13483 /* We can't combine into a call pattern. Remember, though, that
13484 the return value register is set at this LUID. We could
13485 still replace a register with the return value from the
13486 wrong subroutine call! */
13487 note_stores (insn, record_dead_and_set_regs_1, NULL_RTX);
13488 }
13489 else
13490 note_stores (insn, record_dead_and_set_regs_1, insn);
13491 }
13492
13493 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13494 register present in the SUBREG, so for each such SUBREG go back and
13495 adjust nonzero and sign bit information of the registers that are
13496 known to have some zero/sign bits set.
13497
13498 This is needed because when combine blows the SUBREGs away, the
13499 information on zero/sign bits is lost and further combines can be
13500 missed because of that. */
13501
13502 static void
13503 record_promoted_value (rtx_insn *insn, rtx subreg)
13504 {
13505 struct insn_link *links;
13506 rtx set;
13507 unsigned int regno = REGNO (SUBREG_REG (subreg));
13508 machine_mode mode = GET_MODE (subreg);
13509
13510 if (!HWI_COMPUTABLE_MODE_P (mode))
13511 return;
13512
13513 for (links = LOG_LINKS (insn); links;)
13514 {
13515 reg_stat_type *rsp;
13516
13517 insn = links->insn;
13518 set = single_set (insn);
13519
13520 if (! set || !REG_P (SET_DEST (set))
13521 || REGNO (SET_DEST (set)) != regno
13522 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13523 {
13524 links = links->next;
13525 continue;
13526 }
13527
13528 rsp = &reg_stat[regno];
13529 if (rsp->last_set == insn)
13530 {
13531 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13532 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13533 }
13534
13535 if (REG_P (SET_SRC (set)))
13536 {
13537 regno = REGNO (SET_SRC (set));
13538 links = LOG_LINKS (insn);
13539 }
13540 else
13541 break;
13542 }
13543 }
13544
13545 /* Check if X, a register, is known to contain a value already
13546 truncated to MODE. In this case we can use a subreg to refer to
13547 the truncated value even though in the generic case we would need
13548 an explicit truncation. */
13549
13550 static bool
13551 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13552 {
13553 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13554 machine_mode truncated = rsp->truncated_to_mode;
13555
13556 if (truncated == 0
13557 || rsp->truncation_label < label_tick_ebb_start)
13558 return false;
13559 if (!partial_subreg_p (mode, truncated))
13560 return true;
13561 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13562 return true;
13563 return false;
13564 }
13565
13566 /* If X is a hard reg or a subreg record the mode that the register is
13567 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13568 able to turn a truncate into a subreg using this information. Return true
13569 if traversing X is complete. */
13570
13571 static bool
13572 record_truncated_value (rtx x)
13573 {
13574 machine_mode truncated_mode;
13575 reg_stat_type *rsp;
13576
13577 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13578 {
13579 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13580 truncated_mode = GET_MODE (x);
13581
13582 if (!partial_subreg_p (truncated_mode, original_mode))
13583 return true;
13584
13585 truncated_mode = GET_MODE (x);
13586 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13587 return true;
13588
13589 x = SUBREG_REG (x);
13590 }
13591 /* ??? For hard-regs we now record everything. We might be able to
13592 optimize this using last_set_mode. */
13593 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13594 truncated_mode = GET_MODE (x);
13595 else
13596 return false;
13597
13598 rsp = &reg_stat[REGNO (x)];
13599 if (rsp->truncated_to_mode == 0
13600 || rsp->truncation_label < label_tick_ebb_start
13601 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13602 {
13603 rsp->truncated_to_mode = truncated_mode;
13604 rsp->truncation_label = label_tick;
13605 }
13606
13607 return true;
13608 }
13609
13610 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13611 the modes they are used in. This can help truning TRUNCATEs into
13612 SUBREGs. */
13613
13614 static void
13615 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13616 {
13617 subrtx_var_iterator::array_type array;
13618 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13619 if (record_truncated_value (*iter))
13620 iter.skip_subrtxes ();
13621 }
13622
13623 /* Scan X for promoted SUBREGs. For each one found,
13624 note what it implies to the registers used in it. */
13625
13626 static void
13627 check_promoted_subreg (rtx_insn *insn, rtx x)
13628 {
13629 if (GET_CODE (x) == SUBREG
13630 && SUBREG_PROMOTED_VAR_P (x)
13631 && REG_P (SUBREG_REG (x)))
13632 record_promoted_value (insn, x);
13633 else
13634 {
13635 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13636 int i, j;
13637
13638 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13639 switch (format[i])
13640 {
13641 case 'e':
13642 check_promoted_subreg (insn, XEXP (x, i));
13643 break;
13644 case 'V':
13645 case 'E':
13646 if (XVEC (x, i) != 0)
13647 for (j = 0; j < XVECLEN (x, i); j++)
13648 check_promoted_subreg (insn, XVECEXP (x, i, j));
13649 break;
13650 }
13651 }
13652 }
13653 \f
13654 /* Verify that all the registers and memory references mentioned in *LOC are
13655 still valid. *LOC was part of a value set in INSN when label_tick was
13656 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13657 the invalid references with (clobber (const_int 0)) and return 1. This
13658 replacement is useful because we often can get useful information about
13659 the form of a value (e.g., if it was produced by a shift that always
13660 produces -1 or 0) even though we don't know exactly what registers it
13661 was produced from. */
13662
13663 static int
13664 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13665 {
13666 rtx x = *loc;
13667 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13668 int len = GET_RTX_LENGTH (GET_CODE (x));
13669 int i, j;
13670
13671 if (REG_P (x))
13672 {
13673 unsigned int regno = REGNO (x);
13674 unsigned int endregno = END_REGNO (x);
13675 unsigned int j;
13676
13677 for (j = regno; j < endregno; j++)
13678 {
13679 reg_stat_type *rsp = &reg_stat[j];
13680 if (rsp->last_set_invalid
13681 /* If this is a pseudo-register that was only set once and not
13682 live at the beginning of the function, it is always valid. */
13683 || (! (regno >= FIRST_PSEUDO_REGISTER
13684 && regno < reg_n_sets_max
13685 && REG_N_SETS (regno) == 1
13686 && (!REGNO_REG_SET_P
13687 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13688 regno)))
13689 && rsp->last_set_label > tick))
13690 {
13691 if (replace)
13692 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13693 return replace;
13694 }
13695 }
13696
13697 return 1;
13698 }
13699 /* If this is a memory reference, make sure that there were no stores after
13700 it that might have clobbered the value. We don't have alias info, so we
13701 assume any store invalidates it. Moreover, we only have local UIDs, so
13702 we also assume that there were stores in the intervening basic blocks. */
13703 else if (MEM_P (x) && !MEM_READONLY_P (x)
13704 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13705 {
13706 if (replace)
13707 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13708 return replace;
13709 }
13710
13711 for (i = 0; i < len; i++)
13712 {
13713 if (fmt[i] == 'e')
13714 {
13715 /* Check for identical subexpressions. If x contains
13716 identical subexpression we only have to traverse one of
13717 them. */
13718 if (i == 1 && ARITHMETIC_P (x))
13719 {
13720 /* Note that at this point x0 has already been checked
13721 and found valid. */
13722 rtx x0 = XEXP (x, 0);
13723 rtx x1 = XEXP (x, 1);
13724
13725 /* If x0 and x1 are identical then x is also valid. */
13726 if (x0 == x1)
13727 return 1;
13728
13729 /* If x1 is identical to a subexpression of x0 then
13730 while checking x0, x1 has already been checked. Thus
13731 it is valid and so as x. */
13732 if (ARITHMETIC_P (x0)
13733 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13734 return 1;
13735
13736 /* If x0 is identical to a subexpression of x1 then x is
13737 valid iff the rest of x1 is valid. */
13738 if (ARITHMETIC_P (x1)
13739 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13740 return
13741 get_last_value_validate (&XEXP (x1,
13742 x0 == XEXP (x1, 0) ? 1 : 0),
13743 insn, tick, replace);
13744 }
13745
13746 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13747 replace) == 0)
13748 return 0;
13749 }
13750 else if (fmt[i] == 'E')
13751 for (j = 0; j < XVECLEN (x, i); j++)
13752 if (get_last_value_validate (&XVECEXP (x, i, j),
13753 insn, tick, replace) == 0)
13754 return 0;
13755 }
13756
13757 /* If we haven't found a reason for it to be invalid, it is valid. */
13758 return 1;
13759 }
13760
13761 /* Get the last value assigned to X, if known. Some registers
13762 in the value may be replaced with (clobber (const_int 0)) if their value
13763 is known longer known reliably. */
13764
13765 static rtx
13766 get_last_value (const_rtx x)
13767 {
13768 unsigned int regno;
13769 rtx value;
13770 reg_stat_type *rsp;
13771
13772 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13773 then convert it to the desired mode. If this is a paradoxical SUBREG,
13774 we cannot predict what values the "extra" bits might have. */
13775 if (GET_CODE (x) == SUBREG
13776 && subreg_lowpart_p (x)
13777 && !paradoxical_subreg_p (x)
13778 && (value = get_last_value (SUBREG_REG (x))) != 0)
13779 return gen_lowpart (GET_MODE (x), value);
13780
13781 if (!REG_P (x))
13782 return 0;
13783
13784 regno = REGNO (x);
13785 rsp = &reg_stat[regno];
13786 value = rsp->last_set_value;
13787
13788 /* If we don't have a value, or if it isn't for this basic block and
13789 it's either a hard register, set more than once, or it's a live
13790 at the beginning of the function, return 0.
13791
13792 Because if it's not live at the beginning of the function then the reg
13793 is always set before being used (is never used without being set).
13794 And, if it's set only once, and it's always set before use, then all
13795 uses must have the same last value, even if it's not from this basic
13796 block. */
13797
13798 if (value == 0
13799 || (rsp->last_set_label < label_tick_ebb_start
13800 && (regno < FIRST_PSEUDO_REGISTER
13801 || regno >= reg_n_sets_max
13802 || REG_N_SETS (regno) != 1
13803 || REGNO_REG_SET_P
13804 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13805 return 0;
13806
13807 /* If the value was set in a later insn than the ones we are processing,
13808 we can't use it even if the register was only set once. */
13809 if (rsp->last_set_label == label_tick
13810 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13811 return 0;
13812
13813 /* If fewer bits were set than what we are asked for now, we cannot use
13814 the value. */
13815 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13816 GET_MODE_PRECISION (GET_MODE (x))))
13817 return 0;
13818
13819 /* If the value has all its registers valid, return it. */
13820 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13821 return value;
13822
13823 /* Otherwise, make a copy and replace any invalid register with
13824 (clobber (const_int 0)). If that fails for some reason, return 0. */
13825
13826 value = copy_rtx (value);
13827 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13828 return value;
13829
13830 return 0;
13831 }
13832 \f
13833 /* Define three variables used for communication between the following
13834 routines. */
13835
13836 static unsigned int reg_dead_regno, reg_dead_endregno;
13837 static int reg_dead_flag;
13838 rtx reg_dead_reg;
13839
13840 /* Function called via note_stores from reg_dead_at_p.
13841
13842 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13843 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13844
13845 static void
13846 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13847 {
13848 unsigned int regno, endregno;
13849
13850 if (!REG_P (dest))
13851 return;
13852
13853 regno = REGNO (dest);
13854 endregno = END_REGNO (dest);
13855 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13856 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13857 }
13858
13859 /* Return nonzero if REG is known to be dead at INSN.
13860
13861 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13862 referencing REG, it is dead. If we hit a SET referencing REG, it is
13863 live. Otherwise, see if it is live or dead at the start of the basic
13864 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13865 must be assumed to be always live. */
13866
13867 static int
13868 reg_dead_at_p (rtx reg, rtx_insn *insn)
13869 {
13870 basic_block block;
13871 unsigned int i;
13872
13873 /* Set variables for reg_dead_at_p_1. */
13874 reg_dead_regno = REGNO (reg);
13875 reg_dead_endregno = END_REGNO (reg);
13876 reg_dead_reg = reg;
13877
13878 reg_dead_flag = 0;
13879
13880 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13881 we allow the machine description to decide whether use-and-clobber
13882 patterns are OK. */
13883 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13884 {
13885 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13886 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13887 return 0;
13888 }
13889
13890 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13891 beginning of basic block. */
13892 block = BLOCK_FOR_INSN (insn);
13893 for (;;)
13894 {
13895 if (INSN_P (insn))
13896 {
13897 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13898 return 1;
13899
13900 note_stores (insn, reg_dead_at_p_1, NULL);
13901 if (reg_dead_flag)
13902 return reg_dead_flag == 1 ? 1 : 0;
13903
13904 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13905 return 1;
13906 }
13907
13908 if (insn == BB_HEAD (block))
13909 break;
13910
13911 insn = PREV_INSN (insn);
13912 }
13913
13914 /* Look at live-in sets for the basic block that we were in. */
13915 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13916 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13917 return 0;
13918
13919 return 1;
13920 }
13921 \f
13922 /* Note hard registers in X that are used. */
13923
13924 static void
13925 mark_used_regs_combine (rtx x)
13926 {
13927 RTX_CODE code = GET_CODE (x);
13928 unsigned int regno;
13929 int i;
13930
13931 switch (code)
13932 {
13933 case LABEL_REF:
13934 case SYMBOL_REF:
13935 case CONST:
13936 CASE_CONST_ANY:
13937 case PC:
13938 case ADDR_VEC:
13939 case ADDR_DIFF_VEC:
13940 case ASM_INPUT:
13941 /* CC0 must die in the insn after it is set, so we don't need to take
13942 special note of it here. */
13943 case CC0:
13944 return;
13945
13946 case CLOBBER:
13947 /* If we are clobbering a MEM, mark any hard registers inside the
13948 address as used. */
13949 if (MEM_P (XEXP (x, 0)))
13950 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13951 return;
13952
13953 case REG:
13954 regno = REGNO (x);
13955 /* A hard reg in a wide mode may really be multiple registers.
13956 If so, mark all of them just like the first. */
13957 if (regno < FIRST_PSEUDO_REGISTER)
13958 {
13959 /* None of this applies to the stack, frame or arg pointers. */
13960 if (regno == STACK_POINTER_REGNUM
13961 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13962 && regno == HARD_FRAME_POINTER_REGNUM)
13963 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13964 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13965 || regno == FRAME_POINTER_REGNUM)
13966 return;
13967
13968 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13969 }
13970 return;
13971
13972 case SET:
13973 {
13974 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13975 the address. */
13976 rtx testreg = SET_DEST (x);
13977
13978 while (GET_CODE (testreg) == SUBREG
13979 || GET_CODE (testreg) == ZERO_EXTRACT
13980 || GET_CODE (testreg) == STRICT_LOW_PART)
13981 testreg = XEXP (testreg, 0);
13982
13983 if (MEM_P (testreg))
13984 mark_used_regs_combine (XEXP (testreg, 0));
13985
13986 mark_used_regs_combine (SET_SRC (x));
13987 }
13988 return;
13989
13990 default:
13991 break;
13992 }
13993
13994 /* Recursively scan the operands of this expression. */
13995
13996 {
13997 const char *fmt = GET_RTX_FORMAT (code);
13998
13999 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
14000 {
14001 if (fmt[i] == 'e')
14002 mark_used_regs_combine (XEXP (x, i));
14003 else if (fmt[i] == 'E')
14004 {
14005 int j;
14006
14007 for (j = 0; j < XVECLEN (x, i); j++)
14008 mark_used_regs_combine (XVECEXP (x, i, j));
14009 }
14010 }
14011 }
14012 }
14013 \f
14014 /* Remove register number REGNO from the dead registers list of INSN.
14015
14016 Return the note used to record the death, if there was one. */
14017
14018 rtx
14019 remove_death (unsigned int regno, rtx_insn *insn)
14020 {
14021 rtx note = find_regno_note (insn, REG_DEAD, regno);
14022
14023 if (note)
14024 remove_note (insn, note);
14025
14026 return note;
14027 }
14028
14029 /* For each register (hardware or pseudo) used within expression X, if its
14030 death is in an instruction with luid between FROM_LUID (inclusive) and
14031 TO_INSN (exclusive), put a REG_DEAD note for that register in the
14032 list headed by PNOTES.
14033
14034 That said, don't move registers killed by maybe_kill_insn.
14035
14036 This is done when X is being merged by combination into TO_INSN. These
14037 notes will then be distributed as needed. */
14038
14039 static void
14040 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
14041 rtx *pnotes)
14042 {
14043 const char *fmt;
14044 int len, i;
14045 enum rtx_code code = GET_CODE (x);
14046
14047 if (code == REG)
14048 {
14049 unsigned int regno = REGNO (x);
14050 rtx_insn *where_dead = reg_stat[regno].last_death;
14051
14052 /* If we do not know where the register died, it may still die between
14053 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
14054 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
14055 {
14056 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
14057 while (insn
14058 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
14059 && DF_INSN_LUID (insn) >= from_luid)
14060 {
14061 if (dead_or_set_regno_p (insn, regno))
14062 {
14063 if (find_regno_note (insn, REG_DEAD, regno))
14064 where_dead = insn;
14065 break;
14066 }
14067
14068 insn = prev_real_nondebug_insn (insn);
14069 }
14070 }
14071
14072 /* Don't move the register if it gets killed in between from and to. */
14073 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
14074 && ! reg_referenced_p (x, maybe_kill_insn))
14075 return;
14076
14077 if (where_dead
14078 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14079 && DF_INSN_LUID (where_dead) >= from_luid
14080 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14081 {
14082 rtx note = remove_death (regno, where_dead);
14083
14084 /* It is possible for the call above to return 0. This can occur
14085 when last_death points to I2 or I1 that we combined with.
14086 In that case make a new note.
14087
14088 We must also check for the case where X is a hard register
14089 and NOTE is a death note for a range of hard registers
14090 including X. In that case, we must put REG_DEAD notes for
14091 the remaining registers in place of NOTE. */
14092
14093 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14094 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14095 {
14096 unsigned int deadregno = REGNO (XEXP (note, 0));
14097 unsigned int deadend = END_REGNO (XEXP (note, 0));
14098 unsigned int ourend = END_REGNO (x);
14099 unsigned int i;
14100
14101 for (i = deadregno; i < deadend; i++)
14102 if (i < regno || i >= ourend)
14103 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14104 }
14105
14106 /* If we didn't find any note, or if we found a REG_DEAD note that
14107 covers only part of the given reg, and we have a multi-reg hard
14108 register, then to be safe we must check for REG_DEAD notes
14109 for each register other than the first. They could have
14110 their own REG_DEAD notes lying around. */
14111 else if ((note == 0
14112 || (note != 0
14113 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14114 GET_MODE (x))))
14115 && regno < FIRST_PSEUDO_REGISTER
14116 && REG_NREGS (x) > 1)
14117 {
14118 unsigned int ourend = END_REGNO (x);
14119 unsigned int i, offset;
14120 rtx oldnotes = 0;
14121
14122 if (note)
14123 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14124 else
14125 offset = 1;
14126
14127 for (i = regno + offset; i < ourend; i++)
14128 move_deaths (regno_reg_rtx[i],
14129 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14130 }
14131
14132 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14133 {
14134 XEXP (note, 1) = *pnotes;
14135 *pnotes = note;
14136 }
14137 else
14138 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14139 }
14140
14141 return;
14142 }
14143
14144 else if (GET_CODE (x) == SET)
14145 {
14146 rtx dest = SET_DEST (x);
14147
14148 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14149
14150 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14151 that accesses one word of a multi-word item, some
14152 piece of everything register in the expression is used by
14153 this insn, so remove any old death. */
14154 /* ??? So why do we test for equality of the sizes? */
14155
14156 if (GET_CODE (dest) == ZERO_EXTRACT
14157 || GET_CODE (dest) == STRICT_LOW_PART
14158 || (GET_CODE (dest) == SUBREG
14159 && !read_modify_subreg_p (dest)))
14160 {
14161 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14162 return;
14163 }
14164
14165 /* If this is some other SUBREG, we know it replaces the entire
14166 value, so use that as the destination. */
14167 if (GET_CODE (dest) == SUBREG)
14168 dest = SUBREG_REG (dest);
14169
14170 /* If this is a MEM, adjust deaths of anything used in the address.
14171 For a REG (the only other possibility), the entire value is
14172 being replaced so the old value is not used in this insn. */
14173
14174 if (MEM_P (dest))
14175 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14176 to_insn, pnotes);
14177 return;
14178 }
14179
14180 else if (GET_CODE (x) == CLOBBER)
14181 return;
14182
14183 len = GET_RTX_LENGTH (code);
14184 fmt = GET_RTX_FORMAT (code);
14185
14186 for (i = 0; i < len; i++)
14187 {
14188 if (fmt[i] == 'E')
14189 {
14190 int j;
14191 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14192 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14193 to_insn, pnotes);
14194 }
14195 else if (fmt[i] == 'e')
14196 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14197 }
14198 }
14199 \f
14200 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14201 pattern of an insn. X must be a REG. */
14202
14203 static int
14204 reg_bitfield_target_p (rtx x, rtx body)
14205 {
14206 int i;
14207
14208 if (GET_CODE (body) == SET)
14209 {
14210 rtx dest = SET_DEST (body);
14211 rtx target;
14212 unsigned int regno, tregno, endregno, endtregno;
14213
14214 if (GET_CODE (dest) == ZERO_EXTRACT)
14215 target = XEXP (dest, 0);
14216 else if (GET_CODE (dest) == STRICT_LOW_PART)
14217 target = SUBREG_REG (XEXP (dest, 0));
14218 else
14219 return 0;
14220
14221 if (GET_CODE (target) == SUBREG)
14222 target = SUBREG_REG (target);
14223
14224 if (!REG_P (target))
14225 return 0;
14226
14227 tregno = REGNO (target), regno = REGNO (x);
14228 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14229 return target == x;
14230
14231 endtregno = end_hard_regno (GET_MODE (target), tregno);
14232 endregno = end_hard_regno (GET_MODE (x), regno);
14233
14234 return endregno > tregno && regno < endtregno;
14235 }
14236
14237 else if (GET_CODE (body) == PARALLEL)
14238 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14239 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14240 return 1;
14241
14242 return 0;
14243 }
14244 \f
14245 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14246 as appropriate. I3 and I2 are the insns resulting from the combination
14247 insns including FROM (I2 may be zero).
14248
14249 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14250 not need REG_DEAD notes because they are being substituted for. This
14251 saves searching in the most common cases.
14252
14253 Each note in the list is either ignored or placed on some insns, depending
14254 on the type of note. */
14255
14256 static void
14257 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14258 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14259 {
14260 rtx note, next_note;
14261 rtx tem_note;
14262 rtx_insn *tem_insn;
14263
14264 for (note = notes; note; note = next_note)
14265 {
14266 rtx_insn *place = 0, *place2 = 0;
14267
14268 next_note = XEXP (note, 1);
14269 switch (REG_NOTE_KIND (note))
14270 {
14271 case REG_BR_PROB:
14272 case REG_BR_PRED:
14273 /* Doesn't matter much where we put this, as long as it's somewhere.
14274 It is preferable to keep these notes on branches, which is most
14275 likely to be i3. */
14276 place = i3;
14277 break;
14278
14279 case REG_NON_LOCAL_GOTO:
14280 if (JUMP_P (i3))
14281 place = i3;
14282 else
14283 {
14284 gcc_assert (i2 && JUMP_P (i2));
14285 place = i2;
14286 }
14287 break;
14288
14289 case REG_EH_REGION:
14290 /* These notes must remain with the call or trapping instruction. */
14291 if (CALL_P (i3))
14292 place = i3;
14293 else if (i2 && CALL_P (i2))
14294 place = i2;
14295 else
14296 {
14297 gcc_assert (cfun->can_throw_non_call_exceptions);
14298 if (may_trap_p (i3))
14299 place = i3;
14300 else if (i2 && may_trap_p (i2))
14301 place = i2;
14302 /* ??? Otherwise assume we've combined things such that we
14303 can now prove that the instructions can't trap. Drop the
14304 note in this case. */
14305 }
14306 break;
14307
14308 case REG_ARGS_SIZE:
14309 /* ??? How to distribute between i3-i1. Assume i3 contains the
14310 entire adjustment. Assert i3 contains at least some adjust. */
14311 if (!noop_move_p (i3))
14312 {
14313 poly_int64 old_size, args_size = get_args_size (note);
14314 /* fixup_args_size_notes looks at REG_NORETURN note,
14315 so ensure the note is placed there first. */
14316 if (CALL_P (i3))
14317 {
14318 rtx *np;
14319 for (np = &next_note; *np; np = &XEXP (*np, 1))
14320 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14321 {
14322 rtx n = *np;
14323 *np = XEXP (n, 1);
14324 XEXP (n, 1) = REG_NOTES (i3);
14325 REG_NOTES (i3) = n;
14326 break;
14327 }
14328 }
14329 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14330 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14331 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14332 gcc_assert (maybe_ne (old_size, args_size)
14333 || (CALL_P (i3)
14334 && !ACCUMULATE_OUTGOING_ARGS
14335 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14336 }
14337 break;
14338
14339 case REG_NORETURN:
14340 case REG_SETJMP:
14341 case REG_TM:
14342 case REG_CALL_DECL:
14343 case REG_CALL_NOCF_CHECK:
14344 /* These notes must remain with the call. It should not be
14345 possible for both I2 and I3 to be a call. */
14346 if (CALL_P (i3))
14347 place = i3;
14348 else
14349 {
14350 gcc_assert (i2 && CALL_P (i2));
14351 place = i2;
14352 }
14353 break;
14354
14355 case REG_UNUSED:
14356 /* Any clobbers for i3 may still exist, and so we must process
14357 REG_UNUSED notes from that insn.
14358
14359 Any clobbers from i2 or i1 can only exist if they were added by
14360 recog_for_combine. In that case, recog_for_combine created the
14361 necessary REG_UNUSED notes. Trying to keep any original
14362 REG_UNUSED notes from these insns can cause incorrect output
14363 if it is for the same register as the original i3 dest.
14364 In that case, we will notice that the register is set in i3,
14365 and then add a REG_UNUSED note for the destination of i3, which
14366 is wrong. However, it is possible to have REG_UNUSED notes from
14367 i2 or i1 for register which were both used and clobbered, so
14368 we keep notes from i2 or i1 if they will turn into REG_DEAD
14369 notes. */
14370
14371 /* If this register is set or clobbered in I3, put the note there
14372 unless there is one already. */
14373 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14374 {
14375 if (from_insn != i3)
14376 break;
14377
14378 if (! (REG_P (XEXP (note, 0))
14379 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14380 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14381 place = i3;
14382 }
14383 /* Otherwise, if this register is used by I3, then this register
14384 now dies here, so we must put a REG_DEAD note here unless there
14385 is one already. */
14386 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14387 && ! (REG_P (XEXP (note, 0))
14388 ? find_regno_note (i3, REG_DEAD,
14389 REGNO (XEXP (note, 0)))
14390 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14391 {
14392 PUT_REG_NOTE_KIND (note, REG_DEAD);
14393 place = i3;
14394 }
14395
14396 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14397 but we can't tell which at this point. We must reset any
14398 expectations we had about the value that was previously
14399 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14400 and, if appropriate, restore its previous value, but we
14401 don't have enough information for that at this point. */
14402 else
14403 {
14404 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14405
14406 /* Otherwise, if this register is now referenced in i2
14407 then the register used to be modified in one of the
14408 original insns. If it was i3 (say, in an unused
14409 parallel), it's now completely gone, so the note can
14410 be discarded. But if it was modified in i2, i1 or i0
14411 and we still reference it in i2, then we're
14412 referencing the previous value, and since the
14413 register was modified and REG_UNUSED, we know that
14414 the previous value is now dead. So, if we only
14415 reference the register in i2, we change the note to
14416 REG_DEAD, to reflect the previous value. However, if
14417 we're also setting or clobbering the register as
14418 scratch, we know (because the register was not
14419 referenced in i3) that it's unused, just as it was
14420 unused before, and we place the note in i2. */
14421 if (from_insn != i3 && i2 && INSN_P (i2)
14422 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14423 {
14424 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14425 PUT_REG_NOTE_KIND (note, REG_DEAD);
14426 if (! (REG_P (XEXP (note, 0))
14427 ? find_regno_note (i2, REG_NOTE_KIND (note),
14428 REGNO (XEXP (note, 0)))
14429 : find_reg_note (i2, REG_NOTE_KIND (note),
14430 XEXP (note, 0))))
14431 place = i2;
14432 }
14433 }
14434
14435 break;
14436
14437 case REG_EQUAL:
14438 case REG_EQUIV:
14439 case REG_NOALIAS:
14440 /* These notes say something about results of an insn. We can
14441 only support them if they used to be on I3 in which case they
14442 remain on I3. Otherwise they are ignored.
14443
14444 If the note refers to an expression that is not a constant, we
14445 must also ignore the note since we cannot tell whether the
14446 equivalence is still true. It might be possible to do
14447 slightly better than this (we only have a problem if I2DEST
14448 or I1DEST is present in the expression), but it doesn't
14449 seem worth the trouble. */
14450
14451 if (from_insn == i3
14452 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14453 place = i3;
14454 break;
14455
14456 case REG_INC:
14457 /* These notes say something about how a register is used. They must
14458 be present on any use of the register in I2 or I3. */
14459 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14460 place = i3;
14461
14462 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14463 {
14464 if (place)
14465 place2 = i2;
14466 else
14467 place = i2;
14468 }
14469 break;
14470
14471 case REG_LABEL_TARGET:
14472 case REG_LABEL_OPERAND:
14473 /* This can show up in several ways -- either directly in the
14474 pattern, or hidden off in the constant pool with (or without?)
14475 a REG_EQUAL note. */
14476 /* ??? Ignore the without-reg_equal-note problem for now. */
14477 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14478 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14479 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14480 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14481 place = i3;
14482
14483 if (i2
14484 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14485 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14486 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14487 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14488 {
14489 if (place)
14490 place2 = i2;
14491 else
14492 place = i2;
14493 }
14494
14495 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14496 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14497 there. */
14498 if (place && JUMP_P (place)
14499 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14500 && (JUMP_LABEL (place) == NULL
14501 || JUMP_LABEL (place) == XEXP (note, 0)))
14502 {
14503 rtx label = JUMP_LABEL (place);
14504
14505 if (!label)
14506 JUMP_LABEL (place) = XEXP (note, 0);
14507 else if (LABEL_P (label))
14508 LABEL_NUSES (label)--;
14509 }
14510
14511 if (place2 && JUMP_P (place2)
14512 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14513 && (JUMP_LABEL (place2) == NULL
14514 || JUMP_LABEL (place2) == XEXP (note, 0)))
14515 {
14516 rtx label = JUMP_LABEL (place2);
14517
14518 if (!label)
14519 JUMP_LABEL (place2) = XEXP (note, 0);
14520 else if (LABEL_P (label))
14521 LABEL_NUSES (label)--;
14522 place2 = 0;
14523 }
14524 break;
14525
14526 case REG_NONNEG:
14527 /* This note says something about the value of a register prior
14528 to the execution of an insn. It is too much trouble to see
14529 if the note is still correct in all situations. It is better
14530 to simply delete it. */
14531 break;
14532
14533 case REG_DEAD:
14534 /* If we replaced the right hand side of FROM_INSN with a
14535 REG_EQUAL note, the original use of the dying register
14536 will not have been combined into I3 and I2. In such cases,
14537 FROM_INSN is guaranteed to be the first of the combined
14538 instructions, so we simply need to search back before
14539 FROM_INSN for the previous use or set of this register,
14540 then alter the notes there appropriately.
14541
14542 If the register is used as an input in I3, it dies there.
14543 Similarly for I2, if it is nonzero and adjacent to I3.
14544
14545 If the register is not used as an input in either I3 or I2
14546 and it is not one of the registers we were supposed to eliminate,
14547 there are two possibilities. We might have a non-adjacent I2
14548 or we might have somehow eliminated an additional register
14549 from a computation. For example, we might have had A & B where
14550 we discover that B will always be zero. In this case we will
14551 eliminate the reference to A.
14552
14553 In both cases, we must search to see if we can find a previous
14554 use of A and put the death note there. */
14555
14556 if (from_insn
14557 && from_insn == i2mod
14558 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14559 tem_insn = from_insn;
14560 else
14561 {
14562 if (from_insn
14563 && CALL_P (from_insn)
14564 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14565 place = from_insn;
14566 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14567 {
14568 /* If the new I2 sets the same register that is marked
14569 dead in the note, we do not in general know where to
14570 put the note. One important case we _can_ handle is
14571 when the note comes from I3. */
14572 if (from_insn == i3)
14573 place = i3;
14574 else
14575 break;
14576 }
14577 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14578 place = i3;
14579 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14580 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14581 place = i2;
14582 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14583 && !(i2mod
14584 && reg_overlap_mentioned_p (XEXP (note, 0),
14585 i2mod_old_rhs)))
14586 || rtx_equal_p (XEXP (note, 0), elim_i1)
14587 || rtx_equal_p (XEXP (note, 0), elim_i0))
14588 break;
14589 tem_insn = i3;
14590 }
14591
14592 if (place == 0)
14593 {
14594 basic_block bb = this_basic_block;
14595
14596 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14597 {
14598 if (!NONDEBUG_INSN_P (tem_insn))
14599 {
14600 if (tem_insn == BB_HEAD (bb))
14601 break;
14602 continue;
14603 }
14604
14605 /* If the register is being set at TEM_INSN, see if that is all
14606 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14607 into a REG_UNUSED note instead. Don't delete sets to
14608 global register vars. */
14609 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14610 || !global_regs[REGNO (XEXP (note, 0))])
14611 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14612 {
14613 rtx set = single_set (tem_insn);
14614 rtx inner_dest = 0;
14615 rtx_insn *cc0_setter = NULL;
14616
14617 if (set != 0)
14618 for (inner_dest = SET_DEST (set);
14619 (GET_CODE (inner_dest) == STRICT_LOW_PART
14620 || GET_CODE (inner_dest) == SUBREG
14621 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14622 inner_dest = XEXP (inner_dest, 0))
14623 ;
14624
14625 /* Verify that it was the set, and not a clobber that
14626 modified the register.
14627
14628 CC0 targets must be careful to maintain setter/user
14629 pairs. If we cannot delete the setter due to side
14630 effects, mark the user with an UNUSED note instead
14631 of deleting it. */
14632
14633 if (set != 0 && ! side_effects_p (SET_SRC (set))
14634 && rtx_equal_p (XEXP (note, 0), inner_dest)
14635 && (!HAVE_cc0
14636 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14637 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14638 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14639 {
14640 /* Move the notes and links of TEM_INSN elsewhere.
14641 This might delete other dead insns recursively.
14642 First set the pattern to something that won't use
14643 any register. */
14644 rtx old_notes = REG_NOTES (tem_insn);
14645
14646 PATTERN (tem_insn) = pc_rtx;
14647 REG_NOTES (tem_insn) = NULL;
14648
14649 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14650 NULL_RTX, NULL_RTX, NULL_RTX);
14651 distribute_links (LOG_LINKS (tem_insn));
14652
14653 unsigned int regno = REGNO (XEXP (note, 0));
14654 reg_stat_type *rsp = &reg_stat[regno];
14655 if (rsp->last_set == tem_insn)
14656 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14657
14658 SET_INSN_DELETED (tem_insn);
14659 if (tem_insn == i2)
14660 i2 = NULL;
14661
14662 /* Delete the setter too. */
14663 if (cc0_setter)
14664 {
14665 PATTERN (cc0_setter) = pc_rtx;
14666 old_notes = REG_NOTES (cc0_setter);
14667 REG_NOTES (cc0_setter) = NULL;
14668
14669 distribute_notes (old_notes, cc0_setter,
14670 cc0_setter, NULL,
14671 NULL_RTX, NULL_RTX, NULL_RTX);
14672 distribute_links (LOG_LINKS (cc0_setter));
14673
14674 SET_INSN_DELETED (cc0_setter);
14675 if (cc0_setter == i2)
14676 i2 = NULL;
14677 }
14678 }
14679 else
14680 {
14681 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14682
14683 /* If there isn't already a REG_UNUSED note, put one
14684 here. Do not place a REG_DEAD note, even if
14685 the register is also used here; that would not
14686 match the algorithm used in lifetime analysis
14687 and can cause the consistency check in the
14688 scheduler to fail. */
14689 if (! find_regno_note (tem_insn, REG_UNUSED,
14690 REGNO (XEXP (note, 0))))
14691 place = tem_insn;
14692 break;
14693 }
14694 }
14695 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14696 || (CALL_P (tem_insn)
14697 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14698 {
14699 place = tem_insn;
14700
14701 /* If we are doing a 3->2 combination, and we have a
14702 register which formerly died in i3 and was not used
14703 by i2, which now no longer dies in i3 and is used in
14704 i2 but does not die in i2, and place is between i2
14705 and i3, then we may need to move a link from place to
14706 i2. */
14707 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14708 && from_insn
14709 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14710 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14711 {
14712 struct insn_link *links = LOG_LINKS (place);
14713 LOG_LINKS (place) = NULL;
14714 distribute_links (links);
14715 }
14716 break;
14717 }
14718
14719 if (tem_insn == BB_HEAD (bb))
14720 break;
14721 }
14722
14723 }
14724
14725 /* If the register is set or already dead at PLACE, we needn't do
14726 anything with this note if it is still a REG_DEAD note.
14727 We check here if it is set at all, not if is it totally replaced,
14728 which is what `dead_or_set_p' checks, so also check for it being
14729 set partially. */
14730
14731 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14732 {
14733 unsigned int regno = REGNO (XEXP (note, 0));
14734 reg_stat_type *rsp = &reg_stat[regno];
14735
14736 if (dead_or_set_p (place, XEXP (note, 0))
14737 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14738 {
14739 /* Unless the register previously died in PLACE, clear
14740 last_death. [I no longer understand why this is
14741 being done.] */
14742 if (rsp->last_death != place)
14743 rsp->last_death = 0;
14744 place = 0;
14745 }
14746 else
14747 rsp->last_death = place;
14748
14749 /* If this is a death note for a hard reg that is occupying
14750 multiple registers, ensure that we are still using all
14751 parts of the object. If we find a piece of the object
14752 that is unused, we must arrange for an appropriate REG_DEAD
14753 note to be added for it. However, we can't just emit a USE
14754 and tag the note to it, since the register might actually
14755 be dead; so we recourse, and the recursive call then finds
14756 the previous insn that used this register. */
14757
14758 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14759 {
14760 unsigned int endregno = END_REGNO (XEXP (note, 0));
14761 bool all_used = true;
14762 unsigned int i;
14763
14764 for (i = regno; i < endregno; i++)
14765 if ((! refers_to_regno_p (i, PATTERN (place))
14766 && ! find_regno_fusage (place, USE, i))
14767 || dead_or_set_regno_p (place, i))
14768 {
14769 all_used = false;
14770 break;
14771 }
14772
14773 if (! all_used)
14774 {
14775 /* Put only REG_DEAD notes for pieces that are
14776 not already dead or set. */
14777
14778 for (i = regno; i < endregno;
14779 i += hard_regno_nregs (i, reg_raw_mode[i]))
14780 {
14781 rtx piece = regno_reg_rtx[i];
14782 basic_block bb = this_basic_block;
14783
14784 if (! dead_or_set_p (place, piece)
14785 && ! reg_bitfield_target_p (piece,
14786 PATTERN (place)))
14787 {
14788 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14789 NULL_RTX);
14790
14791 distribute_notes (new_note, place, place,
14792 NULL, NULL_RTX, NULL_RTX,
14793 NULL_RTX);
14794 }
14795 else if (! refers_to_regno_p (i, PATTERN (place))
14796 && ! find_regno_fusage (place, USE, i))
14797 for (tem_insn = PREV_INSN (place); ;
14798 tem_insn = PREV_INSN (tem_insn))
14799 {
14800 if (!NONDEBUG_INSN_P (tem_insn))
14801 {
14802 if (tem_insn == BB_HEAD (bb))
14803 break;
14804 continue;
14805 }
14806 if (dead_or_set_p (tem_insn, piece)
14807 || reg_bitfield_target_p (piece,
14808 PATTERN (tem_insn)))
14809 {
14810 add_reg_note (tem_insn, REG_UNUSED, piece);
14811 break;
14812 }
14813 }
14814 }
14815
14816 place = 0;
14817 }
14818 }
14819 }
14820 break;
14821
14822 default:
14823 /* Any other notes should not be present at this point in the
14824 compilation. */
14825 gcc_unreachable ();
14826 }
14827
14828 if (place)
14829 {
14830 XEXP (note, 1) = REG_NOTES (place);
14831 REG_NOTES (place) = note;
14832
14833 /* Set added_notes_insn to the earliest insn we added a note to. */
14834 if (added_notes_insn == 0
14835 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14836 added_notes_insn = place;
14837 }
14838
14839 if (place2)
14840 {
14841 add_shallow_copy_of_reg_note (place2, note);
14842
14843 /* Set added_notes_insn to the earliest insn we added a note to. */
14844 if (added_notes_insn == 0
14845 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14846 added_notes_insn = place2;
14847 }
14848 }
14849 }
14850 \f
14851 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14852 I3, I2, and I1 to new locations. This is also called to add a link
14853 pointing at I3 when I3's destination is changed. */
14854
14855 static void
14856 distribute_links (struct insn_link *links)
14857 {
14858 struct insn_link *link, *next_link;
14859
14860 for (link = links; link; link = next_link)
14861 {
14862 rtx_insn *place = 0;
14863 rtx_insn *insn;
14864 rtx set, reg;
14865
14866 next_link = link->next;
14867
14868 /* If the insn that this link points to is a NOTE, ignore it. */
14869 if (NOTE_P (link->insn))
14870 continue;
14871
14872 set = 0;
14873 rtx pat = PATTERN (link->insn);
14874 if (GET_CODE (pat) == SET)
14875 set = pat;
14876 else if (GET_CODE (pat) == PARALLEL)
14877 {
14878 int i;
14879 for (i = 0; i < XVECLEN (pat, 0); i++)
14880 {
14881 set = XVECEXP (pat, 0, i);
14882 if (GET_CODE (set) != SET)
14883 continue;
14884
14885 reg = SET_DEST (set);
14886 while (GET_CODE (reg) == ZERO_EXTRACT
14887 || GET_CODE (reg) == STRICT_LOW_PART
14888 || GET_CODE (reg) == SUBREG)
14889 reg = XEXP (reg, 0);
14890
14891 if (!REG_P (reg))
14892 continue;
14893
14894 if (REGNO (reg) == link->regno)
14895 break;
14896 }
14897 if (i == XVECLEN (pat, 0))
14898 continue;
14899 }
14900 else
14901 continue;
14902
14903 reg = SET_DEST (set);
14904
14905 while (GET_CODE (reg) == ZERO_EXTRACT
14906 || GET_CODE (reg) == STRICT_LOW_PART
14907 || GET_CODE (reg) == SUBREG)
14908 reg = XEXP (reg, 0);
14909
14910 if (reg == pc_rtx)
14911 continue;
14912
14913 /* A LOG_LINK is defined as being placed on the first insn that uses
14914 a register and points to the insn that sets the register. Start
14915 searching at the next insn after the target of the link and stop
14916 when we reach a set of the register or the end of the basic block.
14917
14918 Note that this correctly handles the link that used to point from
14919 I3 to I2. Also note that not much searching is typically done here
14920 since most links don't point very far away. */
14921
14922 for (insn = NEXT_INSN (link->insn);
14923 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14924 || BB_HEAD (this_basic_block->next_bb) != insn));
14925 insn = NEXT_INSN (insn))
14926 if (DEBUG_INSN_P (insn))
14927 continue;
14928 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14929 {
14930 if (reg_referenced_p (reg, PATTERN (insn)))
14931 place = insn;
14932 break;
14933 }
14934 else if (CALL_P (insn)
14935 && find_reg_fusage (insn, USE, reg))
14936 {
14937 place = insn;
14938 break;
14939 }
14940 else if (INSN_P (insn) && reg_set_p (reg, insn))
14941 break;
14942
14943 /* If we found a place to put the link, place it there unless there
14944 is already a link to the same insn as LINK at that point. */
14945
14946 if (place)
14947 {
14948 struct insn_link *link2;
14949
14950 FOR_EACH_LOG_LINK (link2, place)
14951 if (link2->insn == link->insn && link2->regno == link->regno)
14952 break;
14953
14954 if (link2 == NULL)
14955 {
14956 link->next = LOG_LINKS (place);
14957 LOG_LINKS (place) = link;
14958
14959 /* Set added_links_insn to the earliest insn we added a
14960 link to. */
14961 if (added_links_insn == 0
14962 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14963 added_links_insn = place;
14964 }
14965 }
14966 }
14967 }
14968 \f
14969 /* Check for any register or memory mentioned in EQUIV that is not
14970 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14971 of EXPR where some registers may have been replaced by constants. */
14972
14973 static bool
14974 unmentioned_reg_p (rtx equiv, rtx expr)
14975 {
14976 subrtx_iterator::array_type array;
14977 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14978 {
14979 const_rtx x = *iter;
14980 if ((REG_P (x) || MEM_P (x))
14981 && !reg_mentioned_p (x, expr))
14982 return true;
14983 }
14984 return false;
14985 }
14986 \f
14987 DEBUG_FUNCTION void
14988 dump_combine_stats (FILE *file)
14989 {
14990 fprintf
14991 (file,
14992 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14993 combine_attempts, combine_merges, combine_extras, combine_successes);
14994 }
14995
14996 void
14997 dump_combine_total_stats (FILE *file)
14998 {
14999 fprintf
15000 (file,
15001 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15002 total_attempts, total_merges, total_extras, total_successes);
15003 }
15004 \f
15005 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15006 the reg-to-reg copy can usefully combine with later instructions, but we
15007 do not want to combine the hard reg into later instructions, for that
15008 restricts register allocation. */
15009 static void
15010 make_more_copies (void)
15011 {
15012 basic_block bb;
15013
15014 FOR_EACH_BB_FN (bb, cfun)
15015 {
15016 rtx_insn *insn;
15017
15018 FOR_BB_INSNS (bb, insn)
15019 {
15020 if (!NONDEBUG_INSN_P (insn))
15021 continue;
15022
15023 rtx set = single_set (insn);
15024 if (!set)
15025 continue;
15026
15027 rtx dest = SET_DEST (set);
15028 if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
15029 continue;
15030
15031 rtx src = SET_SRC (set);
15032 if (!(REG_P (src) && HARD_REGISTER_P (src)))
15033 continue;
15034 if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
15035 continue;
15036
15037 rtx new_reg = gen_reg_rtx (GET_MODE (dest));
15038 rtx_insn *new_insn = gen_move_insn (new_reg, src);
15039 SET_SRC (set) = new_reg;
15040 emit_insn_before (new_insn, insn);
15041 df_insn_rescan (insn);
15042 }
15043 }
15044 }
15045
15046 /* Try combining insns through substitution. */
15047 static unsigned int
15048 rest_of_handle_combine (void)
15049 {
15050 make_more_copies ();
15051
15052 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
15053 df_note_add_problem ();
15054 df_analyze ();
15055
15056 regstat_init_n_sets_and_refs ();
15057 reg_n_sets_max = max_reg_num ();
15058
15059 int rebuild_jump_labels_after_combine
15060 = combine_instructions (get_insns (), max_reg_num ());
15061
15062 /* Combining insns may have turned an indirect jump into a
15063 direct jump. Rebuild the JUMP_LABEL fields of jumping
15064 instructions. */
15065 if (rebuild_jump_labels_after_combine)
15066 {
15067 if (dom_info_available_p (CDI_DOMINATORS))
15068 free_dominance_info (CDI_DOMINATORS);
15069 timevar_push (TV_JUMP);
15070 rebuild_jump_labels (get_insns ());
15071 cleanup_cfg (0);
15072 timevar_pop (TV_JUMP);
15073 }
15074
15075 regstat_free_n_sets_and_refs ();
15076 return 0;
15077 }
15078
15079 namespace {
15080
15081 const pass_data pass_data_combine =
15082 {
15083 RTL_PASS, /* type */
15084 "combine", /* name */
15085 OPTGROUP_NONE, /* optinfo_flags */
15086 TV_COMBINE, /* tv_id */
15087 PROP_cfglayout, /* properties_required */
15088 0, /* properties_provided */
15089 0, /* properties_destroyed */
15090 0, /* todo_flags_start */
15091 TODO_df_finish, /* todo_flags_finish */
15092 };
15093
15094 class pass_combine : public rtl_opt_pass
15095 {
15096 public:
15097 pass_combine (gcc::context *ctxt)
15098 : rtl_opt_pass (pass_data_combine, ctxt)
15099 {}
15100
15101 /* opt_pass methods: */
15102 virtual bool gate (function *) { return (optimize > 0); }
15103 virtual unsigned int execute (function *)
15104 {
15105 return rest_of_handle_combine ();
15106 }
15107
15108 }; // class pass_combine
15109
15110 } // anon namespace
15111
15112 rtl_opt_pass *
15113 make_pass_combine (gcc::context *ctxt)
15114 {
15115 return new pass_combine (ctxt);
15116 }