Do not simplify "(and (reg) (const bit)" to if_then_else.
[gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107
108 /* Number of attempts to combine instructions in this function. */
109
110 static int combine_attempts;
111
112 /* Number of attempts that got as far as substitution in this function. */
113
114 static int combine_merges;
115
116 /* Number of instructions combined with added SETs in this function. */
117
118 static int combine_extras;
119
120 /* Number of instructions combined in this function. */
121
122 static int combine_successes;
123
124 /* Totals over entire compilation. */
125
126 static int total_attempts, total_merges, total_extras, total_successes;
127
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
134
135 static rtx_insn *i2mod;
136
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
138
139 static rtx i2mod_old_rhs;
140
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
142
143 static rtx i2mod_new_rhs;
144 \f
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
148
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
151
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
157
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
160
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
164
165 Therefore, we maintain the following fields:
166
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
175
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
180
181 (The next two parameters are out of date).
182
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
194
195 /* Record last value assigned to (hard or pseudo) register n. */
196
197 rtx last_set_value;
198
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
201
202 int last_set_table_tick;
203
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
206
207 int last_set_label;
208
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
213
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
217
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
221
222 char last_set_invalid;
223
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
228
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
233
234 If an entry is zero, it means that we don't know anything special. */
235
236 unsigned char sign_bit_copies;
237
238 unsigned HOST_WIDE_INT nonzero_bits;
239
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
243
244 int truncation_label;
245
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
250
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
252 };
253
254
255 static vec<reg_stat_type> reg_stat;
256
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
264
265 static unsigned int reg_n_sets_max;
266
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
269
270 static int mem_last_set;
271
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
274
275 static int last_call_luid;
276
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
282
283 static rtx_insn *subst_insn;
284
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
291
292 static int subst_low_luid;
293
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
296
297 static HARD_REG_SET newpat_used_regs;
298
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
302
303 static rtx_insn *added_links_insn;
304
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
308
309 \f
310 /* Length of the currently allocated uid_insn_cost array. */
311
312 static int max_uid_known;
313
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
316
317 static int *uid_insn_cost;
318
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
321
322 struct insn_link {
323 rtx_insn *insn;
324 unsigned int regno;
325 struct insn_link *next;
326 };
327
328 static struct insn_link **uid_log_links;
329
330 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
331 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
332
333 #define FOR_EACH_LOG_LINK(L, INSN) \
334 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
335
336 /* Links for LOG_LINKS are allocated from this obstack. */
337
338 static struct obstack insn_link_obstack;
339
340 /* Allocate a link. */
341
342 static inline struct insn_link *
343 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
344 {
345 struct insn_link *l
346 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
347 sizeof (struct insn_link));
348 l->insn = insn;
349 l->regno = regno;
350 l->next = next;
351 return l;
352 }
353
354 /* Incremented for each basic block. */
355
356 static int label_tick;
357
358 /* Reset to label_tick for each extended basic block in scanning order. */
359
360 static int label_tick_ebb_start;
361
362 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
363 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
364
365 static machine_mode nonzero_bits_mode;
366
367 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
368 be safely used. It is zero while computing them and after combine has
369 completed. This former test prevents propagating values based on
370 previously set values, which can be incorrect if a variable is modified
371 in a loop. */
372
373 static int nonzero_sign_valid;
374
375 \f
376 /* Record one modification to rtl structure
377 to be undone by storing old_contents into *where. */
378
379 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
380
381 struct undo
382 {
383 struct undo *next;
384 enum undo_kind kind;
385 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
386 union { rtx *r; int *i; struct insn_link **l; } where;
387 };
388
389 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
390 num_undo says how many are currently recorded.
391
392 other_insn is nonzero if we have modified some other insn in the process
393 of working on subst_insn. It must be verified too. */
394
395 struct undobuf
396 {
397 struct undo *undos;
398 struct undo *frees;
399 rtx_insn *other_insn;
400 };
401
402 static struct undobuf undobuf;
403
404 /* Number of times the pseudo being substituted for
405 was found and replaced. */
406
407 static int n_occurrences;
408
409 static rtx reg_nonzero_bits_for_combine (const_rtx, machine_mode, const_rtx,
410 machine_mode,
411 unsigned HOST_WIDE_INT,
412 unsigned HOST_WIDE_INT *);
413 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, machine_mode, const_rtx,
414 machine_mode,
415 unsigned int, unsigned int *);
416 static void do_SUBST (rtx *, rtx);
417 static void do_SUBST_INT (int *, int);
418 static void init_reg_last (void);
419 static void setup_incoming_promotions (rtx_insn *);
420 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
421 static int cant_combine_insn_p (rtx_insn *);
422 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
423 rtx_insn *, rtx_insn *, rtx *, rtx *);
424 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
425 static int contains_muldiv (rtx);
426 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
427 int *, rtx_insn *);
428 static void undo_all (void);
429 static void undo_commit (void);
430 static rtx *find_split_point (rtx *, rtx_insn *, bool);
431 static rtx subst (rtx, rtx, rtx, int, int, int);
432 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
433 static rtx simplify_if_then_else (rtx);
434 static rtx simplify_set (rtx);
435 static rtx simplify_logical (rtx);
436 static rtx expand_compound_operation (rtx);
437 static const_rtx expand_field_assignment (const_rtx);
438 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
439 rtx, unsigned HOST_WIDE_INT, int, int, int);
440 static rtx extract_left_shift (rtx, int);
441 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
442 unsigned HOST_WIDE_INT *);
443 static rtx canon_reg_for_combine (rtx, rtx);
444 static rtx force_to_mode (rtx, machine_mode,
445 unsigned HOST_WIDE_INT, int);
446 static rtx if_then_else_cond (rtx, rtx *, rtx *);
447 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
448 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
449 static rtx make_field_assignment (rtx);
450 static rtx apply_distributive_law (rtx);
451 static rtx distribute_and_simplify_rtx (rtx, int);
452 static rtx simplify_and_const_int_1 (machine_mode, rtx,
453 unsigned HOST_WIDE_INT);
454 static rtx simplify_and_const_int (rtx, machine_mode, rtx,
455 unsigned HOST_WIDE_INT);
456 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
457 HOST_WIDE_INT, machine_mode, int *);
458 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
459 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
460 int);
461 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
462 static rtx gen_lowpart_for_combine (machine_mode, rtx);
463 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
464 rtx, rtx *);
465 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
466 static void update_table_tick (rtx);
467 static void record_value_for_reg (rtx, rtx_insn *, rtx);
468 static void check_promoted_subreg (rtx_insn *, rtx);
469 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
470 static void record_dead_and_set_regs (rtx_insn *);
471 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
472 static rtx get_last_value (const_rtx);
473 static int use_crosses_set_p (const_rtx, int);
474 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
475 static int reg_dead_at_p (rtx, rtx_insn *);
476 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
477 static int reg_bitfield_target_p (rtx, rtx);
478 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
479 static void distribute_links (struct insn_link *);
480 static void mark_used_regs_combine (rtx);
481 static void record_promoted_value (rtx_insn *, rtx);
482 static bool unmentioned_reg_p (rtx, rtx);
483 static void record_truncated_values (rtx *, void *);
484 static bool reg_truncated_to_mode (machine_mode, const_rtx);
485 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
486 \f
487
488 /* It is not safe to use ordinary gen_lowpart in combine.
489 See comments in gen_lowpart_for_combine. */
490 #undef RTL_HOOKS_GEN_LOWPART
491 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
492
493 /* Our implementation of gen_lowpart never emits a new pseudo. */
494 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
495 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
496
497 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
498 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
499
500 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
501 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
502
503 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
504 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
505
506 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
507
508 \f
509 /* Convenience wrapper for the canonicalize_comparison target hook.
510 Target hooks cannot use enum rtx_code. */
511 static inline void
512 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
513 bool op0_preserve_value)
514 {
515 int code_int = (int)*code;
516 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
517 *code = (enum rtx_code)code_int;
518 }
519
520 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
521 PATTERN can not be split. Otherwise, it returns an insn sequence.
522 This is a wrapper around split_insns which ensures that the
523 reg_stat vector is made larger if the splitter creates a new
524 register. */
525
526 static rtx_insn *
527 combine_split_insns (rtx pattern, rtx_insn *insn)
528 {
529 rtx_insn *ret;
530 unsigned int nregs;
531
532 ret = split_insns (pattern, insn);
533 nregs = max_reg_num ();
534 if (nregs > reg_stat.length ())
535 reg_stat.safe_grow_cleared (nregs);
536 return ret;
537 }
538
539 /* This is used by find_single_use to locate an rtx in LOC that
540 contains exactly one use of DEST, which is typically either a REG
541 or CC0. It returns a pointer to the innermost rtx expression
542 containing DEST. Appearances of DEST that are being used to
543 totally replace it are not counted. */
544
545 static rtx *
546 find_single_use_1 (rtx dest, rtx *loc)
547 {
548 rtx x = *loc;
549 enum rtx_code code = GET_CODE (x);
550 rtx *result = NULL;
551 rtx *this_result;
552 int i;
553 const char *fmt;
554
555 switch (code)
556 {
557 case CONST:
558 case LABEL_REF:
559 case SYMBOL_REF:
560 CASE_CONST_ANY:
561 case CLOBBER:
562 return 0;
563
564 case SET:
565 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
566 of a REG that occupies all of the REG, the insn uses DEST if
567 it is mentioned in the destination or the source. Otherwise, we
568 need just check the source. */
569 if (GET_CODE (SET_DEST (x)) != CC0
570 && GET_CODE (SET_DEST (x)) != PC
571 && !REG_P (SET_DEST (x))
572 && ! (GET_CODE (SET_DEST (x)) == SUBREG
573 && REG_P (SUBREG_REG (SET_DEST (x)))
574 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
575 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
576 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
577 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
578 break;
579
580 return find_single_use_1 (dest, &SET_SRC (x));
581
582 case MEM:
583 case SUBREG:
584 return find_single_use_1 (dest, &XEXP (x, 0));
585
586 default:
587 break;
588 }
589
590 /* If it wasn't one of the common cases above, check each expression and
591 vector of this code. Look for a unique usage of DEST. */
592
593 fmt = GET_RTX_FORMAT (code);
594 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
595 {
596 if (fmt[i] == 'e')
597 {
598 if (dest == XEXP (x, i)
599 || (REG_P (dest) && REG_P (XEXP (x, i))
600 && REGNO (dest) == REGNO (XEXP (x, i))))
601 this_result = loc;
602 else
603 this_result = find_single_use_1 (dest, &XEXP (x, i));
604
605 if (result == NULL)
606 result = this_result;
607 else if (this_result)
608 /* Duplicate usage. */
609 return NULL;
610 }
611 else if (fmt[i] == 'E')
612 {
613 int j;
614
615 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
616 {
617 if (XVECEXP (x, i, j) == dest
618 || (REG_P (dest)
619 && REG_P (XVECEXP (x, i, j))
620 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
621 this_result = loc;
622 else
623 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
624
625 if (result == NULL)
626 result = this_result;
627 else if (this_result)
628 return NULL;
629 }
630 }
631 }
632
633 return result;
634 }
635
636
637 /* See if DEST, produced in INSN, is used only a single time in the
638 sequel. If so, return a pointer to the innermost rtx expression in which
639 it is used.
640
641 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
642
643 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
644 care about REG_DEAD notes or LOG_LINKS.
645
646 Otherwise, we find the single use by finding an insn that has a
647 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
648 only referenced once in that insn, we know that it must be the first
649 and last insn referencing DEST. */
650
651 static rtx *
652 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
653 {
654 basic_block bb;
655 rtx_insn *next;
656 rtx *result;
657 struct insn_link *link;
658
659 if (dest == cc0_rtx)
660 {
661 next = NEXT_INSN (insn);
662 if (next == 0
663 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
664 return 0;
665
666 result = find_single_use_1 (dest, &PATTERN (next));
667 if (result && ploc)
668 *ploc = next;
669 return result;
670 }
671
672 if (!REG_P (dest))
673 return 0;
674
675 bb = BLOCK_FOR_INSN (insn);
676 for (next = NEXT_INSN (insn);
677 next && BLOCK_FOR_INSN (next) == bb;
678 next = NEXT_INSN (next))
679 if (INSN_P (next) && dead_or_set_p (next, dest))
680 {
681 FOR_EACH_LOG_LINK (link, next)
682 if (link->insn == insn && link->regno == REGNO (dest))
683 break;
684
685 if (link)
686 {
687 result = find_single_use_1 (dest, &PATTERN (next));
688 if (ploc)
689 *ploc = next;
690 return result;
691 }
692 }
693
694 return 0;
695 }
696 \f
697 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
698 insn. The substitution can be undone by undo_all. If INTO is already
699 set to NEWVAL, do not record this change. Because computing NEWVAL might
700 also call SUBST, we have to compute it before we put anything into
701 the undo table. */
702
703 static void
704 do_SUBST (rtx *into, rtx newval)
705 {
706 struct undo *buf;
707 rtx oldval = *into;
708
709 if (oldval == newval)
710 return;
711
712 /* We'd like to catch as many invalid transformations here as
713 possible. Unfortunately, there are way too many mode changes
714 that are perfectly valid, so we'd waste too much effort for
715 little gain doing the checks here. Focus on catching invalid
716 transformations involving integer constants. */
717 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
718 && CONST_INT_P (newval))
719 {
720 /* Sanity check that we're replacing oldval with a CONST_INT
721 that is a valid sign-extension for the original mode. */
722 gcc_assert (INTVAL (newval)
723 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
724
725 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
726 CONST_INT is not valid, because after the replacement, the
727 original mode would be gone. Unfortunately, we can't tell
728 when do_SUBST is called to replace the operand thereof, so we
729 perform this test on oldval instead, checking whether an
730 invalid replacement took place before we got here. */
731 gcc_assert (!(GET_CODE (oldval) == SUBREG
732 && CONST_INT_P (SUBREG_REG (oldval))));
733 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
734 && CONST_INT_P (XEXP (oldval, 0))));
735 }
736
737 if (undobuf.frees)
738 buf = undobuf.frees, undobuf.frees = buf->next;
739 else
740 buf = XNEW (struct undo);
741
742 buf->kind = UNDO_RTX;
743 buf->where.r = into;
744 buf->old_contents.r = oldval;
745 *into = newval;
746
747 buf->next = undobuf.undos, undobuf.undos = buf;
748 }
749
750 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
751
752 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
753 for the value of a HOST_WIDE_INT value (including CONST_INT) is
754 not safe. */
755
756 static void
757 do_SUBST_INT (int *into, int newval)
758 {
759 struct undo *buf;
760 int oldval = *into;
761
762 if (oldval == newval)
763 return;
764
765 if (undobuf.frees)
766 buf = undobuf.frees, undobuf.frees = buf->next;
767 else
768 buf = XNEW (struct undo);
769
770 buf->kind = UNDO_INT;
771 buf->where.i = into;
772 buf->old_contents.i = oldval;
773 *into = newval;
774
775 buf->next = undobuf.undos, undobuf.undos = buf;
776 }
777
778 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
779
780 /* Similar to SUBST, but just substitute the mode. This is used when
781 changing the mode of a pseudo-register, so that any other
782 references to the entry in the regno_reg_rtx array will change as
783 well. */
784
785 static void
786 do_SUBST_MODE (rtx *into, machine_mode newval)
787 {
788 struct undo *buf;
789 machine_mode oldval = GET_MODE (*into);
790
791 if (oldval == newval)
792 return;
793
794 if (undobuf.frees)
795 buf = undobuf.frees, undobuf.frees = buf->next;
796 else
797 buf = XNEW (struct undo);
798
799 buf->kind = UNDO_MODE;
800 buf->where.r = into;
801 buf->old_contents.m = oldval;
802 adjust_reg_mode (*into, newval);
803
804 buf->next = undobuf.undos, undobuf.undos = buf;
805 }
806
807 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
808
809 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
810
811 static void
812 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
813 {
814 struct undo *buf;
815 struct insn_link * oldval = *into;
816
817 if (oldval == newval)
818 return;
819
820 if (undobuf.frees)
821 buf = undobuf.frees, undobuf.frees = buf->next;
822 else
823 buf = XNEW (struct undo);
824
825 buf->kind = UNDO_LINKS;
826 buf->where.l = into;
827 buf->old_contents.l = oldval;
828 *into = newval;
829
830 buf->next = undobuf.undos, undobuf.undos = buf;
831 }
832
833 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
834 \f
835 /* Subroutine of try_combine. Determine whether the replacement patterns
836 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
837 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
838 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
839 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
840 of all the instructions can be estimated and the replacements are more
841 expensive than the original sequence. */
842
843 static bool
844 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
845 rtx newpat, rtx newi2pat, rtx newotherpat)
846 {
847 int i0_cost, i1_cost, i2_cost, i3_cost;
848 int new_i2_cost, new_i3_cost;
849 int old_cost, new_cost;
850
851 /* Lookup the original insn_rtx_costs. */
852 i2_cost = INSN_COST (i2);
853 i3_cost = INSN_COST (i3);
854
855 if (i1)
856 {
857 i1_cost = INSN_COST (i1);
858 if (i0)
859 {
860 i0_cost = INSN_COST (i0);
861 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
862 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
863 }
864 else
865 {
866 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
867 ? i1_cost + i2_cost + i3_cost : 0);
868 i0_cost = 0;
869 }
870 }
871 else
872 {
873 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
874 i1_cost = i0_cost = 0;
875 }
876
877 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
878 correct that. */
879 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
880 old_cost -= i1_cost;
881
882
883 /* Calculate the replacement insn_rtx_costs. */
884 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
885 if (newi2pat)
886 {
887 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
888 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
889 ? new_i2_cost + new_i3_cost : 0;
890 }
891 else
892 {
893 new_cost = new_i3_cost;
894 new_i2_cost = 0;
895 }
896
897 if (undobuf.other_insn)
898 {
899 int old_other_cost, new_other_cost;
900
901 old_other_cost = INSN_COST (undobuf.other_insn);
902 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
903 if (old_other_cost > 0 && new_other_cost > 0)
904 {
905 old_cost += old_other_cost;
906 new_cost += new_other_cost;
907 }
908 else
909 old_cost = 0;
910 }
911
912 /* Disallow this combination if both new_cost and old_cost are greater than
913 zero, and new_cost is greater than old cost. */
914 int reject = old_cost > 0 && new_cost > old_cost;
915
916 if (dump_file)
917 {
918 fprintf (dump_file, "%s combination of insns ",
919 reject ? "rejecting" : "allowing");
920 if (i0)
921 fprintf (dump_file, "%d, ", INSN_UID (i0));
922 if (i1 && INSN_UID (i1) != INSN_UID (i2))
923 fprintf (dump_file, "%d, ", INSN_UID (i1));
924 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
925
926 fprintf (dump_file, "original costs ");
927 if (i0)
928 fprintf (dump_file, "%d + ", i0_cost);
929 if (i1 && INSN_UID (i1) != INSN_UID (i2))
930 fprintf (dump_file, "%d + ", i1_cost);
931 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
932
933 if (newi2pat)
934 fprintf (dump_file, "replacement costs %d + %d = %d\n",
935 new_i2_cost, new_i3_cost, new_cost);
936 else
937 fprintf (dump_file, "replacement cost %d\n", new_cost);
938 }
939
940 if (reject)
941 return false;
942
943 /* Update the uid_insn_cost array with the replacement costs. */
944 INSN_COST (i2) = new_i2_cost;
945 INSN_COST (i3) = new_i3_cost;
946 if (i1)
947 {
948 INSN_COST (i1) = 0;
949 if (i0)
950 INSN_COST (i0) = 0;
951 }
952
953 return true;
954 }
955
956
957 /* Delete any insns that copy a register to itself. */
958
959 static void
960 delete_noop_moves (void)
961 {
962 rtx_insn *insn, *next;
963 basic_block bb;
964
965 FOR_EACH_BB_FN (bb, cfun)
966 {
967 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
968 {
969 next = NEXT_INSN (insn);
970 if (INSN_P (insn) && noop_move_p (insn))
971 {
972 if (dump_file)
973 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
974
975 delete_insn_and_edges (insn);
976 }
977 }
978 }
979 }
980
981 \f
982 /* Return false if we do not want to (or cannot) combine DEF. */
983 static bool
984 can_combine_def_p (df_ref def)
985 {
986 /* Do not consider if it is pre/post modification in MEM. */
987 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
988 return false;
989
990 unsigned int regno = DF_REF_REGNO (def);
991
992 /* Do not combine frame pointer adjustments. */
993 if ((regno == FRAME_POINTER_REGNUM
994 && (!reload_completed || frame_pointer_needed))
995 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
996 && regno == HARD_FRAME_POINTER_REGNUM
997 && (!reload_completed || frame_pointer_needed))
998 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
999 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1000 return false;
1001
1002 return true;
1003 }
1004
1005 /* Return false if we do not want to (or cannot) combine USE. */
1006 static bool
1007 can_combine_use_p (df_ref use)
1008 {
1009 /* Do not consider the usage of the stack pointer by function call. */
1010 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1011 return false;
1012
1013 return true;
1014 }
1015
1016 /* Fill in log links field for all insns. */
1017
1018 static void
1019 create_log_links (void)
1020 {
1021 basic_block bb;
1022 rtx_insn **next_use;
1023 rtx_insn *insn;
1024 df_ref def, use;
1025
1026 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1027
1028 /* Pass through each block from the end, recording the uses of each
1029 register and establishing log links when def is encountered.
1030 Note that we do not clear next_use array in order to save time,
1031 so we have to test whether the use is in the same basic block as def.
1032
1033 There are a few cases below when we do not consider the definition or
1034 usage -- these are taken from original flow.c did. Don't ask me why it is
1035 done this way; I don't know and if it works, I don't want to know. */
1036
1037 FOR_EACH_BB_FN (bb, cfun)
1038 {
1039 FOR_BB_INSNS_REVERSE (bb, insn)
1040 {
1041 if (!NONDEBUG_INSN_P (insn))
1042 continue;
1043
1044 /* Log links are created only once. */
1045 gcc_assert (!LOG_LINKS (insn));
1046
1047 FOR_EACH_INSN_DEF (def, insn)
1048 {
1049 unsigned int regno = DF_REF_REGNO (def);
1050 rtx_insn *use_insn;
1051
1052 if (!next_use[regno])
1053 continue;
1054
1055 if (!can_combine_def_p (def))
1056 continue;
1057
1058 use_insn = next_use[regno];
1059 next_use[regno] = NULL;
1060
1061 if (BLOCK_FOR_INSN (use_insn) != bb)
1062 continue;
1063
1064 /* flow.c claimed:
1065
1066 We don't build a LOG_LINK for hard registers contained
1067 in ASM_OPERANDs. If these registers get replaced,
1068 we might wind up changing the semantics of the insn,
1069 even if reload can make what appear to be valid
1070 assignments later. */
1071 if (regno < FIRST_PSEUDO_REGISTER
1072 && asm_noperands (PATTERN (use_insn)) >= 0)
1073 continue;
1074
1075 /* Don't add duplicate links between instructions. */
1076 struct insn_link *links;
1077 FOR_EACH_LOG_LINK (links, use_insn)
1078 if (insn == links->insn && regno == links->regno)
1079 break;
1080
1081 if (!links)
1082 LOG_LINKS (use_insn)
1083 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1084 }
1085
1086 FOR_EACH_INSN_USE (use, insn)
1087 if (can_combine_use_p (use))
1088 next_use[DF_REF_REGNO (use)] = insn;
1089 }
1090 }
1091
1092 free (next_use);
1093 }
1094
1095 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1096 true if we found a LOG_LINK that proves that A feeds B. This only works
1097 if there are no instructions between A and B which could have a link
1098 depending on A, since in that case we would not record a link for B.
1099 We also check the implicit dependency created by a cc0 setter/user
1100 pair. */
1101
1102 static bool
1103 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1104 {
1105 struct insn_link *links;
1106 FOR_EACH_LOG_LINK (links, b)
1107 if (links->insn == a)
1108 return true;
1109 if (HAVE_cc0 && sets_cc0_p (a))
1110 return true;
1111 return false;
1112 }
1113 \f
1114 /* Main entry point for combiner. F is the first insn of the function.
1115 NREGS is the first unused pseudo-reg number.
1116
1117 Return nonzero if the combiner has turned an indirect jump
1118 instruction into a direct jump. */
1119 static int
1120 combine_instructions (rtx_insn *f, unsigned int nregs)
1121 {
1122 rtx_insn *insn, *next;
1123 rtx_insn *prev;
1124 struct insn_link *links, *nextlinks;
1125 rtx_insn *first;
1126 basic_block last_bb;
1127
1128 int new_direct_jump_p = 0;
1129
1130 for (first = f; first && !INSN_P (first); )
1131 first = NEXT_INSN (first);
1132 if (!first)
1133 return 0;
1134
1135 combine_attempts = 0;
1136 combine_merges = 0;
1137 combine_extras = 0;
1138 combine_successes = 0;
1139
1140 rtl_hooks = combine_rtl_hooks;
1141
1142 reg_stat.safe_grow_cleared (nregs);
1143
1144 init_recog_no_volatile ();
1145
1146 /* Allocate array for insn info. */
1147 max_uid_known = get_max_uid ();
1148 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1149 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1150 gcc_obstack_init (&insn_link_obstack);
1151
1152 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1153
1154 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1155 problems when, for example, we have j <<= 1 in a loop. */
1156
1157 nonzero_sign_valid = 0;
1158 label_tick = label_tick_ebb_start = 1;
1159
1160 /* Scan all SETs and see if we can deduce anything about what
1161 bits are known to be zero for some registers and how many copies
1162 of the sign bit are known to exist for those registers.
1163
1164 Also set any known values so that we can use it while searching
1165 for what bits are known to be set. */
1166
1167 setup_incoming_promotions (first);
1168 /* Allow the entry block and the first block to fall into the same EBB.
1169 Conceptually the incoming promotions are assigned to the entry block. */
1170 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1171
1172 create_log_links ();
1173 FOR_EACH_BB_FN (this_basic_block, cfun)
1174 {
1175 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1176 last_call_luid = 0;
1177 mem_last_set = -1;
1178
1179 label_tick++;
1180 if (!single_pred_p (this_basic_block)
1181 || single_pred (this_basic_block) != last_bb)
1182 label_tick_ebb_start = label_tick;
1183 last_bb = this_basic_block;
1184
1185 FOR_BB_INSNS (this_basic_block, insn)
1186 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1187 {
1188 rtx links;
1189
1190 subst_low_luid = DF_INSN_LUID (insn);
1191 subst_insn = insn;
1192
1193 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1194 insn);
1195 record_dead_and_set_regs (insn);
1196
1197 if (AUTO_INC_DEC)
1198 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1199 if (REG_NOTE_KIND (links) == REG_INC)
1200 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1201 insn);
1202
1203 /* Record the current insn_rtx_cost of this instruction. */
1204 if (NONJUMP_INSN_P (insn))
1205 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1206 optimize_this_for_speed_p);
1207 if (dump_file)
1208 fprintf (dump_file, "insn_cost %d: %d\n",
1209 INSN_UID (insn), INSN_COST (insn));
1210 }
1211 }
1212
1213 nonzero_sign_valid = 1;
1214
1215 /* Now scan all the insns in forward order. */
1216 label_tick = label_tick_ebb_start = 1;
1217 init_reg_last ();
1218 setup_incoming_promotions (first);
1219 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1220 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1221
1222 FOR_EACH_BB_FN (this_basic_block, cfun)
1223 {
1224 rtx_insn *last_combined_insn = NULL;
1225 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1226 last_call_luid = 0;
1227 mem_last_set = -1;
1228
1229 label_tick++;
1230 if (!single_pred_p (this_basic_block)
1231 || single_pred (this_basic_block) != last_bb)
1232 label_tick_ebb_start = label_tick;
1233 last_bb = this_basic_block;
1234
1235 rtl_profile_for_bb (this_basic_block);
1236 for (insn = BB_HEAD (this_basic_block);
1237 insn != NEXT_INSN (BB_END (this_basic_block));
1238 insn = next ? next : NEXT_INSN (insn))
1239 {
1240 next = 0;
1241 if (!NONDEBUG_INSN_P (insn))
1242 continue;
1243
1244 while (last_combined_insn
1245 && last_combined_insn->deleted ())
1246 last_combined_insn = PREV_INSN (last_combined_insn);
1247 if (last_combined_insn == NULL_RTX
1248 || BARRIER_P (last_combined_insn)
1249 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1250 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1251 last_combined_insn = insn;
1252
1253 /* See if we know about function return values before this
1254 insn based upon SUBREG flags. */
1255 check_promoted_subreg (insn, PATTERN (insn));
1256
1257 /* See if we can find hardregs and subreg of pseudos in
1258 narrower modes. This could help turning TRUNCATEs
1259 into SUBREGs. */
1260 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1261
1262 /* Try this insn with each insn it links back to. */
1263
1264 FOR_EACH_LOG_LINK (links, insn)
1265 if ((next = try_combine (insn, links->insn, NULL,
1266 NULL, &new_direct_jump_p,
1267 last_combined_insn)) != 0)
1268 {
1269 statistics_counter_event (cfun, "two-insn combine", 1);
1270 goto retry;
1271 }
1272
1273 /* Try each sequence of three linked insns ending with this one. */
1274
1275 if (max_combine >= 3)
1276 FOR_EACH_LOG_LINK (links, insn)
1277 {
1278 rtx_insn *link = links->insn;
1279
1280 /* If the linked insn has been replaced by a note, then there
1281 is no point in pursuing this chain any further. */
1282 if (NOTE_P (link))
1283 continue;
1284
1285 FOR_EACH_LOG_LINK (nextlinks, link)
1286 if ((next = try_combine (insn, link, nextlinks->insn,
1287 NULL, &new_direct_jump_p,
1288 last_combined_insn)) != 0)
1289 {
1290 statistics_counter_event (cfun, "three-insn combine", 1);
1291 goto retry;
1292 }
1293 }
1294
1295 /* Try to combine a jump insn that uses CC0
1296 with a preceding insn that sets CC0, and maybe with its
1297 logical predecessor as well.
1298 This is how we make decrement-and-branch insns.
1299 We need this special code because data flow connections
1300 via CC0 do not get entered in LOG_LINKS. */
1301
1302 if (HAVE_cc0
1303 && JUMP_P (insn)
1304 && (prev = prev_nonnote_insn (insn)) != 0
1305 && NONJUMP_INSN_P (prev)
1306 && sets_cc0_p (PATTERN (prev)))
1307 {
1308 if ((next = try_combine (insn, prev, NULL, NULL,
1309 &new_direct_jump_p,
1310 last_combined_insn)) != 0)
1311 goto retry;
1312
1313 FOR_EACH_LOG_LINK (nextlinks, prev)
1314 if ((next = try_combine (insn, prev, nextlinks->insn,
1315 NULL, &new_direct_jump_p,
1316 last_combined_insn)) != 0)
1317 goto retry;
1318 }
1319
1320 /* Do the same for an insn that explicitly references CC0. */
1321 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1322 && (prev = prev_nonnote_insn (insn)) != 0
1323 && NONJUMP_INSN_P (prev)
1324 && sets_cc0_p (PATTERN (prev))
1325 && GET_CODE (PATTERN (insn)) == SET
1326 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1327 {
1328 if ((next = try_combine (insn, prev, NULL, NULL,
1329 &new_direct_jump_p,
1330 last_combined_insn)) != 0)
1331 goto retry;
1332
1333 FOR_EACH_LOG_LINK (nextlinks, prev)
1334 if ((next = try_combine (insn, prev, nextlinks->insn,
1335 NULL, &new_direct_jump_p,
1336 last_combined_insn)) != 0)
1337 goto retry;
1338 }
1339
1340 /* Finally, see if any of the insns that this insn links to
1341 explicitly references CC0. If so, try this insn, that insn,
1342 and its predecessor if it sets CC0. */
1343 if (HAVE_cc0)
1344 {
1345 FOR_EACH_LOG_LINK (links, insn)
1346 if (NONJUMP_INSN_P (links->insn)
1347 && GET_CODE (PATTERN (links->insn)) == SET
1348 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1349 && (prev = prev_nonnote_insn (links->insn)) != 0
1350 && NONJUMP_INSN_P (prev)
1351 && sets_cc0_p (PATTERN (prev))
1352 && (next = try_combine (insn, links->insn,
1353 prev, NULL, &new_direct_jump_p,
1354 last_combined_insn)) != 0)
1355 goto retry;
1356 }
1357
1358 /* Try combining an insn with two different insns whose results it
1359 uses. */
1360 if (max_combine >= 3)
1361 FOR_EACH_LOG_LINK (links, insn)
1362 for (nextlinks = links->next; nextlinks;
1363 nextlinks = nextlinks->next)
1364 if ((next = try_combine (insn, links->insn,
1365 nextlinks->insn, NULL,
1366 &new_direct_jump_p,
1367 last_combined_insn)) != 0)
1368
1369 {
1370 statistics_counter_event (cfun, "three-insn combine", 1);
1371 goto retry;
1372 }
1373
1374 /* Try four-instruction combinations. */
1375 if (max_combine >= 4)
1376 FOR_EACH_LOG_LINK (links, insn)
1377 {
1378 struct insn_link *next1;
1379 rtx_insn *link = links->insn;
1380
1381 /* If the linked insn has been replaced by a note, then there
1382 is no point in pursuing this chain any further. */
1383 if (NOTE_P (link))
1384 continue;
1385
1386 FOR_EACH_LOG_LINK (next1, link)
1387 {
1388 rtx_insn *link1 = next1->insn;
1389 if (NOTE_P (link1))
1390 continue;
1391 /* I0 -> I1 -> I2 -> I3. */
1392 FOR_EACH_LOG_LINK (nextlinks, link1)
1393 if ((next = try_combine (insn, link, link1,
1394 nextlinks->insn,
1395 &new_direct_jump_p,
1396 last_combined_insn)) != 0)
1397 {
1398 statistics_counter_event (cfun, "four-insn combine", 1);
1399 goto retry;
1400 }
1401 /* I0, I1 -> I2, I2 -> I3. */
1402 for (nextlinks = next1->next; nextlinks;
1403 nextlinks = nextlinks->next)
1404 if ((next = try_combine (insn, link, link1,
1405 nextlinks->insn,
1406 &new_direct_jump_p,
1407 last_combined_insn)) != 0)
1408 {
1409 statistics_counter_event (cfun, "four-insn combine", 1);
1410 goto retry;
1411 }
1412 }
1413
1414 for (next1 = links->next; next1; next1 = next1->next)
1415 {
1416 rtx_insn *link1 = next1->insn;
1417 if (NOTE_P (link1))
1418 continue;
1419 /* I0 -> I2; I1, I2 -> I3. */
1420 FOR_EACH_LOG_LINK (nextlinks, link)
1421 if ((next = try_combine (insn, link, link1,
1422 nextlinks->insn,
1423 &new_direct_jump_p,
1424 last_combined_insn)) != 0)
1425 {
1426 statistics_counter_event (cfun, "four-insn combine", 1);
1427 goto retry;
1428 }
1429 /* I0 -> I1; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks, link1)
1431 if ((next = try_combine (insn, link, link1,
1432 nextlinks->insn,
1433 &new_direct_jump_p,
1434 last_combined_insn)) != 0)
1435 {
1436 statistics_counter_event (cfun, "four-insn combine", 1);
1437 goto retry;
1438 }
1439 }
1440 }
1441
1442 /* Try this insn with each REG_EQUAL note it links back to. */
1443 FOR_EACH_LOG_LINK (links, insn)
1444 {
1445 rtx set, note;
1446 rtx_insn *temp = links->insn;
1447 if ((set = single_set (temp)) != 0
1448 && (note = find_reg_equal_equiv_note (temp)) != 0
1449 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1450 /* Avoid using a register that may already been marked
1451 dead by an earlier instruction. */
1452 && ! unmentioned_reg_p (note, SET_SRC (set))
1453 && (GET_MODE (note) == VOIDmode
1454 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1455 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1456 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1457 || (GET_MODE (XEXP (SET_DEST (set), 0))
1458 == GET_MODE (note))))))
1459 {
1460 /* Temporarily replace the set's source with the
1461 contents of the REG_EQUAL note. The insn will
1462 be deleted or recognized by try_combine. */
1463 rtx orig_src = SET_SRC (set);
1464 rtx orig_dest = SET_DEST (set);
1465 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1466 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1467 SET_SRC (set) = note;
1468 i2mod = temp;
1469 i2mod_old_rhs = copy_rtx (orig_src);
1470 i2mod_new_rhs = copy_rtx (note);
1471 next = try_combine (insn, i2mod, NULL, NULL,
1472 &new_direct_jump_p,
1473 last_combined_insn);
1474 i2mod = NULL;
1475 if (next)
1476 {
1477 statistics_counter_event (cfun, "insn-with-note combine", 1);
1478 goto retry;
1479 }
1480 SET_SRC (set) = orig_src;
1481 SET_DEST (set) = orig_dest;
1482 }
1483 }
1484
1485 if (!NOTE_P (insn))
1486 record_dead_and_set_regs (insn);
1487
1488 retry:
1489 ;
1490 }
1491 }
1492
1493 default_rtl_profile ();
1494 clear_bb_flags ();
1495 new_direct_jump_p |= purge_all_dead_edges ();
1496 delete_noop_moves ();
1497
1498 /* Clean up. */
1499 obstack_free (&insn_link_obstack, NULL);
1500 free (uid_log_links);
1501 free (uid_insn_cost);
1502 reg_stat.release ();
1503
1504 {
1505 struct undo *undo, *next;
1506 for (undo = undobuf.frees; undo; undo = next)
1507 {
1508 next = undo->next;
1509 free (undo);
1510 }
1511 undobuf.frees = 0;
1512 }
1513
1514 total_attempts += combine_attempts;
1515 total_merges += combine_merges;
1516 total_extras += combine_extras;
1517 total_successes += combine_successes;
1518
1519 nonzero_sign_valid = 0;
1520 rtl_hooks = general_rtl_hooks;
1521
1522 /* Make recognizer allow volatile MEMs again. */
1523 init_recog ();
1524
1525 return new_direct_jump_p;
1526 }
1527
1528 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1529
1530 static void
1531 init_reg_last (void)
1532 {
1533 unsigned int i;
1534 reg_stat_type *p;
1535
1536 FOR_EACH_VEC_ELT (reg_stat, i, p)
1537 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1538 }
1539 \f
1540 /* Set up any promoted values for incoming argument registers. */
1541
1542 static void
1543 setup_incoming_promotions (rtx_insn *first)
1544 {
1545 tree arg;
1546 bool strictly_local = false;
1547
1548 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1549 arg = DECL_CHAIN (arg))
1550 {
1551 rtx x, reg = DECL_INCOMING_RTL (arg);
1552 int uns1, uns3;
1553 machine_mode mode1, mode2, mode3, mode4;
1554
1555 /* Only continue if the incoming argument is in a register. */
1556 if (!REG_P (reg))
1557 continue;
1558
1559 /* Determine, if possible, whether all call sites of the current
1560 function lie within the current compilation unit. (This does
1561 take into account the exporting of a function via taking its
1562 address, and so forth.) */
1563 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1564
1565 /* The mode and signedness of the argument before any promotions happen
1566 (equal to the mode of the pseudo holding it at that stage). */
1567 mode1 = TYPE_MODE (TREE_TYPE (arg));
1568 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1569
1570 /* The mode and signedness of the argument after any source language and
1571 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1572 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1573 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1574
1575 /* The mode and signedness of the argument as it is actually passed,
1576 see assign_parm_setup_reg in function.c. */
1577 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1578 TREE_TYPE (cfun->decl), 0);
1579
1580 /* The mode of the register in which the argument is being passed. */
1581 mode4 = GET_MODE (reg);
1582
1583 /* Eliminate sign extensions in the callee when:
1584 (a) A mode promotion has occurred; */
1585 if (mode1 == mode3)
1586 continue;
1587 /* (b) The mode of the register is the same as the mode of
1588 the argument as it is passed; */
1589 if (mode3 != mode4)
1590 continue;
1591 /* (c) There's no language level extension; */
1592 if (mode1 == mode2)
1593 ;
1594 /* (c.1) All callers are from the current compilation unit. If that's
1595 the case we don't have to rely on an ABI, we only have to know
1596 what we're generating right now, and we know that we will do the
1597 mode1 to mode2 promotion with the given sign. */
1598 else if (!strictly_local)
1599 continue;
1600 /* (c.2) The combination of the two promotions is useful. This is
1601 true when the signs match, or if the first promotion is unsigned.
1602 In the later case, (sign_extend (zero_extend x)) is the same as
1603 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1604 else if (uns1)
1605 uns3 = true;
1606 else if (uns3)
1607 continue;
1608
1609 /* Record that the value was promoted from mode1 to mode3,
1610 so that any sign extension at the head of the current
1611 function may be eliminated. */
1612 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1613 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1614 record_value_for_reg (reg, first, x);
1615 }
1616 }
1617
1618 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1619 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1620 because some machines (maybe most) will actually do the sign-extension and
1621 this is the conservative approach.
1622
1623 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1624 kludge. */
1625
1626 static rtx
1627 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1628 {
1629 if (GET_MODE_PRECISION (mode) < prec
1630 && CONST_INT_P (src)
1631 && INTVAL (src) > 0
1632 && val_signbit_known_set_p (mode, INTVAL (src)))
1633 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (mode));
1634
1635 return src;
1636 }
1637
1638 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1639 and SET. */
1640
1641 static void
1642 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1643 rtx x)
1644 {
1645 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1646 unsigned HOST_WIDE_INT bits = 0;
1647 rtx reg_equal = NULL, src = SET_SRC (set);
1648 unsigned int num = 0;
1649
1650 if (reg_equal_note)
1651 reg_equal = XEXP (reg_equal_note, 0);
1652
1653 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1654 {
1655 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1656 if (reg_equal)
1657 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1658 }
1659
1660 /* Don't call nonzero_bits if it cannot change anything. */
1661 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1662 {
1663 bits = nonzero_bits (src, nonzero_bits_mode);
1664 if (reg_equal && bits)
1665 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1666 rsp->nonzero_bits |= bits;
1667 }
1668
1669 /* Don't call num_sign_bit_copies if it cannot change anything. */
1670 if (rsp->sign_bit_copies != 1)
1671 {
1672 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1673 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1674 {
1675 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1676 if (num == 0 || numeq > num)
1677 num = numeq;
1678 }
1679 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1680 rsp->sign_bit_copies = num;
1681 }
1682 }
1683
1684 /* Called via note_stores. If X is a pseudo that is narrower than
1685 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1686
1687 If we are setting only a portion of X and we can't figure out what
1688 portion, assume all bits will be used since we don't know what will
1689 be happening.
1690
1691 Similarly, set how many bits of X are known to be copies of the sign bit
1692 at all locations in the function. This is the smallest number implied
1693 by any set of X. */
1694
1695 static void
1696 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1697 {
1698 rtx_insn *insn = (rtx_insn *) data;
1699
1700 if (REG_P (x)
1701 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1702 /* If this register is undefined at the start of the file, we can't
1703 say what its contents were. */
1704 && ! REGNO_REG_SET_P
1705 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1706 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1707 {
1708 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1709
1710 if (set == 0 || GET_CODE (set) == CLOBBER)
1711 {
1712 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1713 rsp->sign_bit_copies = 1;
1714 return;
1715 }
1716
1717 /* If this register is being initialized using itself, and the
1718 register is uninitialized in this basic block, and there are
1719 no LOG_LINKS which set the register, then part of the
1720 register is uninitialized. In that case we can't assume
1721 anything about the number of nonzero bits.
1722
1723 ??? We could do better if we checked this in
1724 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1725 could avoid making assumptions about the insn which initially
1726 sets the register, while still using the information in other
1727 insns. We would have to be careful to check every insn
1728 involved in the combination. */
1729
1730 if (insn
1731 && reg_referenced_p (x, PATTERN (insn))
1732 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1733 REGNO (x)))
1734 {
1735 struct insn_link *link;
1736
1737 FOR_EACH_LOG_LINK (link, insn)
1738 if (dead_or_set_p (link->insn, x))
1739 break;
1740 if (!link)
1741 {
1742 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1743 rsp->sign_bit_copies = 1;
1744 return;
1745 }
1746 }
1747
1748 /* If this is a complex assignment, see if we can convert it into a
1749 simple assignment. */
1750 set = expand_field_assignment (set);
1751
1752 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1753 set what we know about X. */
1754
1755 if (SET_DEST (set) == x
1756 || (paradoxical_subreg_p (SET_DEST (set))
1757 && SUBREG_REG (SET_DEST (set)) == x))
1758 update_rsp_from_reg_equal (rsp, insn, set, x);
1759 else
1760 {
1761 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1762 rsp->sign_bit_copies = 1;
1763 }
1764 }
1765 }
1766 \f
1767 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1768 optionally insns that were previously combined into I3 or that will be
1769 combined into the merger of INSN and I3. The order is PRED, PRED2,
1770 INSN, SUCC, SUCC2, I3.
1771
1772 Return 0 if the combination is not allowed for any reason.
1773
1774 If the combination is allowed, *PDEST will be set to the single
1775 destination of INSN and *PSRC to the single source, and this function
1776 will return 1. */
1777
1778 static int
1779 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1780 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1781 rtx *pdest, rtx *psrc)
1782 {
1783 int i;
1784 const_rtx set = 0;
1785 rtx src, dest;
1786 rtx_insn *p;
1787 rtx link;
1788 bool all_adjacent = true;
1789 int (*is_volatile_p) (const_rtx);
1790
1791 if (succ)
1792 {
1793 if (succ2)
1794 {
1795 if (next_active_insn (succ2) != i3)
1796 all_adjacent = false;
1797 if (next_active_insn (succ) != succ2)
1798 all_adjacent = false;
1799 }
1800 else if (next_active_insn (succ) != i3)
1801 all_adjacent = false;
1802 if (next_active_insn (insn) != succ)
1803 all_adjacent = false;
1804 }
1805 else if (next_active_insn (insn) != i3)
1806 all_adjacent = false;
1807
1808 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1809 or a PARALLEL consisting of such a SET and CLOBBERs.
1810
1811 If INSN has CLOBBER parallel parts, ignore them for our processing.
1812 By definition, these happen during the execution of the insn. When it
1813 is merged with another insn, all bets are off. If they are, in fact,
1814 needed and aren't also supplied in I3, they may be added by
1815 recog_for_combine. Otherwise, it won't match.
1816
1817 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1818 note.
1819
1820 Get the source and destination of INSN. If more than one, can't
1821 combine. */
1822
1823 if (GET_CODE (PATTERN (insn)) == SET)
1824 set = PATTERN (insn);
1825 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1826 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1827 {
1828 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1829 {
1830 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1831
1832 switch (GET_CODE (elt))
1833 {
1834 /* This is important to combine floating point insns
1835 for the SH4 port. */
1836 case USE:
1837 /* Combining an isolated USE doesn't make sense.
1838 We depend here on combinable_i3pat to reject them. */
1839 /* The code below this loop only verifies that the inputs of
1840 the SET in INSN do not change. We call reg_set_between_p
1841 to verify that the REG in the USE does not change between
1842 I3 and INSN.
1843 If the USE in INSN was for a pseudo register, the matching
1844 insn pattern will likely match any register; combining this
1845 with any other USE would only be safe if we knew that the
1846 used registers have identical values, or if there was
1847 something to tell them apart, e.g. different modes. For
1848 now, we forgo such complicated tests and simply disallow
1849 combining of USES of pseudo registers with any other USE. */
1850 if (REG_P (XEXP (elt, 0))
1851 && GET_CODE (PATTERN (i3)) == PARALLEL)
1852 {
1853 rtx i3pat = PATTERN (i3);
1854 int i = XVECLEN (i3pat, 0) - 1;
1855 unsigned int regno = REGNO (XEXP (elt, 0));
1856
1857 do
1858 {
1859 rtx i3elt = XVECEXP (i3pat, 0, i);
1860
1861 if (GET_CODE (i3elt) == USE
1862 && REG_P (XEXP (i3elt, 0))
1863 && (REGNO (XEXP (i3elt, 0)) == regno
1864 ? reg_set_between_p (XEXP (elt, 0),
1865 PREV_INSN (insn), i3)
1866 : regno >= FIRST_PSEUDO_REGISTER))
1867 return 0;
1868 }
1869 while (--i >= 0);
1870 }
1871 break;
1872
1873 /* We can ignore CLOBBERs. */
1874 case CLOBBER:
1875 break;
1876
1877 case SET:
1878 /* Ignore SETs whose result isn't used but not those that
1879 have side-effects. */
1880 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1881 && insn_nothrow_p (insn)
1882 && !side_effects_p (elt))
1883 break;
1884
1885 /* If we have already found a SET, this is a second one and
1886 so we cannot combine with this insn. */
1887 if (set)
1888 return 0;
1889
1890 set = elt;
1891 break;
1892
1893 default:
1894 /* Anything else means we can't combine. */
1895 return 0;
1896 }
1897 }
1898
1899 if (set == 0
1900 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1901 so don't do anything with it. */
1902 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1903 return 0;
1904 }
1905 else
1906 return 0;
1907
1908 if (set == 0)
1909 return 0;
1910
1911 /* The simplification in expand_field_assignment may call back to
1912 get_last_value, so set safe guard here. */
1913 subst_low_luid = DF_INSN_LUID (insn);
1914
1915 set = expand_field_assignment (set);
1916 src = SET_SRC (set), dest = SET_DEST (set);
1917
1918 /* Do not eliminate user-specified register if it is in an
1919 asm input because we may break the register asm usage defined
1920 in GCC manual if allow to do so.
1921 Be aware that this may cover more cases than we expect but this
1922 should be harmless. */
1923 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1924 && extract_asm_operands (PATTERN (i3)))
1925 return 0;
1926
1927 /* Don't eliminate a store in the stack pointer. */
1928 if (dest == stack_pointer_rtx
1929 /* Don't combine with an insn that sets a register to itself if it has
1930 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1931 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1932 /* Can't merge an ASM_OPERANDS. */
1933 || GET_CODE (src) == ASM_OPERANDS
1934 /* Can't merge a function call. */
1935 || GET_CODE (src) == CALL
1936 /* Don't eliminate a function call argument. */
1937 || (CALL_P (i3)
1938 && (find_reg_fusage (i3, USE, dest)
1939 || (REG_P (dest)
1940 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1941 && global_regs[REGNO (dest)])))
1942 /* Don't substitute into an incremented register. */
1943 || FIND_REG_INC_NOTE (i3, dest)
1944 || (succ && FIND_REG_INC_NOTE (succ, dest))
1945 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1946 /* Don't substitute into a non-local goto, this confuses CFG. */
1947 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1948 /* Make sure that DEST is not used after SUCC but before I3. */
1949 || (!all_adjacent
1950 && ((succ2
1951 && (reg_used_between_p (dest, succ2, i3)
1952 || reg_used_between_p (dest, succ, succ2)))
1953 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1954 /* Make sure that the value that is to be substituted for the register
1955 does not use any registers whose values alter in between. However,
1956 If the insns are adjacent, a use can't cross a set even though we
1957 think it might (this can happen for a sequence of insns each setting
1958 the same destination; last_set of that register might point to
1959 a NOTE). If INSN has a REG_EQUIV note, the register is always
1960 equivalent to the memory so the substitution is valid even if there
1961 are intervening stores. Also, don't move a volatile asm or
1962 UNSPEC_VOLATILE across any other insns. */
1963 || (! all_adjacent
1964 && (((!MEM_P (src)
1965 || ! find_reg_note (insn, REG_EQUIV, src))
1966 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1967 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1968 || GET_CODE (src) == UNSPEC_VOLATILE))
1969 /* Don't combine across a CALL_INSN, because that would possibly
1970 change whether the life span of some REGs crosses calls or not,
1971 and it is a pain to update that information.
1972 Exception: if source is a constant, moving it later can't hurt.
1973 Accept that as a special case. */
1974 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1975 return 0;
1976
1977 /* DEST must either be a REG or CC0. */
1978 if (REG_P (dest))
1979 {
1980 /* If register alignment is being enforced for multi-word items in all
1981 cases except for parameters, it is possible to have a register copy
1982 insn referencing a hard register that is not allowed to contain the
1983 mode being copied and which would not be valid as an operand of most
1984 insns. Eliminate this problem by not combining with such an insn.
1985
1986 Also, on some machines we don't want to extend the life of a hard
1987 register. */
1988
1989 if (REG_P (src)
1990 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1991 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1992 /* Don't extend the life of a hard register unless it is
1993 user variable (if we have few registers) or it can't
1994 fit into the desired register (meaning something special
1995 is going on).
1996 Also avoid substituting a return register into I3, because
1997 reload can't handle a conflict with constraints of other
1998 inputs. */
1999 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2000 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
2001 return 0;
2002 }
2003 else if (GET_CODE (dest) != CC0)
2004 return 0;
2005
2006
2007 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2008 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2009 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2010 {
2011 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2012
2013 /* If the clobber represents an earlyclobber operand, we must not
2014 substitute an expression containing the clobbered register.
2015 As we do not analyze the constraint strings here, we have to
2016 make the conservative assumption. However, if the register is
2017 a fixed hard reg, the clobber cannot represent any operand;
2018 we leave it up to the machine description to either accept or
2019 reject use-and-clobber patterns. */
2020 if (!REG_P (reg)
2021 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2022 || !fixed_regs[REGNO (reg)])
2023 if (reg_overlap_mentioned_p (reg, src))
2024 return 0;
2025 }
2026
2027 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2028 or not), reject, unless nothing volatile comes between it and I3 */
2029
2030 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2031 {
2032 /* Make sure neither succ nor succ2 contains a volatile reference. */
2033 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2034 return 0;
2035 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2036 return 0;
2037 /* We'll check insns between INSN and I3 below. */
2038 }
2039
2040 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2041 to be an explicit register variable, and was chosen for a reason. */
2042
2043 if (GET_CODE (src) == ASM_OPERANDS
2044 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2045 return 0;
2046
2047 /* If INSN contains volatile references (specifically volatile MEMs),
2048 we cannot combine across any other volatile references.
2049 Even if INSN doesn't contain volatile references, any intervening
2050 volatile insn might affect machine state. */
2051
2052 is_volatile_p = volatile_refs_p (PATTERN (insn))
2053 ? volatile_refs_p
2054 : volatile_insn_p;
2055
2056 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2057 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2058 return 0;
2059
2060 /* If INSN contains an autoincrement or autodecrement, make sure that
2061 register is not used between there and I3, and not already used in
2062 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2063 Also insist that I3 not be a jump; if it were one
2064 and the incremented register were spilled, we would lose. */
2065
2066 if (AUTO_INC_DEC)
2067 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2068 if (REG_NOTE_KIND (link) == REG_INC
2069 && (JUMP_P (i3)
2070 || reg_used_between_p (XEXP (link, 0), insn, i3)
2071 || (pred != NULL_RTX
2072 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2073 || (pred2 != NULL_RTX
2074 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2075 || (succ != NULL_RTX
2076 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2077 || (succ2 != NULL_RTX
2078 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2079 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2080 return 0;
2081
2082 /* Don't combine an insn that follows a CC0-setting insn.
2083 An insn that uses CC0 must not be separated from the one that sets it.
2084 We do, however, allow I2 to follow a CC0-setting insn if that insn
2085 is passed as I1; in that case it will be deleted also.
2086 We also allow combining in this case if all the insns are adjacent
2087 because that would leave the two CC0 insns adjacent as well.
2088 It would be more logical to test whether CC0 occurs inside I1 or I2,
2089 but that would be much slower, and this ought to be equivalent. */
2090
2091 if (HAVE_cc0)
2092 {
2093 p = prev_nonnote_insn (insn);
2094 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2095 && ! all_adjacent)
2096 return 0;
2097 }
2098
2099 /* If we get here, we have passed all the tests and the combination is
2100 to be allowed. */
2101
2102 *pdest = dest;
2103 *psrc = src;
2104
2105 return 1;
2106 }
2107 \f
2108 /* LOC is the location within I3 that contains its pattern or the component
2109 of a PARALLEL of the pattern. We validate that it is valid for combining.
2110
2111 One problem is if I3 modifies its output, as opposed to replacing it
2112 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2113 doing so would produce an insn that is not equivalent to the original insns.
2114
2115 Consider:
2116
2117 (set (reg:DI 101) (reg:DI 100))
2118 (set (subreg:SI (reg:DI 101) 0) <foo>)
2119
2120 This is NOT equivalent to:
2121
2122 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2123 (set (reg:DI 101) (reg:DI 100))])
2124
2125 Not only does this modify 100 (in which case it might still be valid
2126 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2127
2128 We can also run into a problem if I2 sets a register that I1
2129 uses and I1 gets directly substituted into I3 (not via I2). In that
2130 case, we would be getting the wrong value of I2DEST into I3, so we
2131 must reject the combination. This case occurs when I2 and I1 both
2132 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2133 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2134 of a SET must prevent combination from occurring. The same situation
2135 can occur for I0, in which case I0_NOT_IN_SRC is set.
2136
2137 Before doing the above check, we first try to expand a field assignment
2138 into a set of logical operations.
2139
2140 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2141 we place a register that is both set and used within I3. If more than one
2142 such register is detected, we fail.
2143
2144 Return 1 if the combination is valid, zero otherwise. */
2145
2146 static int
2147 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2148 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2149 {
2150 rtx x = *loc;
2151
2152 if (GET_CODE (x) == SET)
2153 {
2154 rtx set = x ;
2155 rtx dest = SET_DEST (set);
2156 rtx src = SET_SRC (set);
2157 rtx inner_dest = dest;
2158 rtx subdest;
2159
2160 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2161 || GET_CODE (inner_dest) == SUBREG
2162 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2163 inner_dest = XEXP (inner_dest, 0);
2164
2165 /* Check for the case where I3 modifies its output, as discussed
2166 above. We don't want to prevent pseudos from being combined
2167 into the address of a MEM, so only prevent the combination if
2168 i1 or i2 set the same MEM. */
2169 if ((inner_dest != dest &&
2170 (!MEM_P (inner_dest)
2171 || rtx_equal_p (i2dest, inner_dest)
2172 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2173 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2174 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2175 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2176 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2177
2178 /* This is the same test done in can_combine_p except we can't test
2179 all_adjacent; we don't have to, since this instruction will stay
2180 in place, thus we are not considering increasing the lifetime of
2181 INNER_DEST.
2182
2183 Also, if this insn sets a function argument, combining it with
2184 something that might need a spill could clobber a previous
2185 function argument; the all_adjacent test in can_combine_p also
2186 checks this; here, we do a more specific test for this case. */
2187
2188 || (REG_P (inner_dest)
2189 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2190 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2191 GET_MODE (inner_dest))))
2192 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2193 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2194 return 0;
2195
2196 /* If DEST is used in I3, it is being killed in this insn, so
2197 record that for later. We have to consider paradoxical
2198 subregs here, since they kill the whole register, but we
2199 ignore partial subregs, STRICT_LOW_PART, etc.
2200 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2201 STACK_POINTER_REGNUM, since these are always considered to be
2202 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2203 subdest = dest;
2204 if (GET_CODE (subdest) == SUBREG
2205 && (GET_MODE_SIZE (GET_MODE (subdest))
2206 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2207 subdest = SUBREG_REG (subdest);
2208 if (pi3dest_killed
2209 && REG_P (subdest)
2210 && reg_referenced_p (subdest, PATTERN (i3))
2211 && REGNO (subdest) != FRAME_POINTER_REGNUM
2212 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2213 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2214 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2215 || (REGNO (subdest) != ARG_POINTER_REGNUM
2216 || ! fixed_regs [REGNO (subdest)]))
2217 && REGNO (subdest) != STACK_POINTER_REGNUM)
2218 {
2219 if (*pi3dest_killed)
2220 return 0;
2221
2222 *pi3dest_killed = subdest;
2223 }
2224 }
2225
2226 else if (GET_CODE (x) == PARALLEL)
2227 {
2228 int i;
2229
2230 for (i = 0; i < XVECLEN (x, 0); i++)
2231 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2232 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2233 return 0;
2234 }
2235
2236 return 1;
2237 }
2238 \f
2239 /* Return 1 if X is an arithmetic expression that contains a multiplication
2240 and division. We don't count multiplications by powers of two here. */
2241
2242 static int
2243 contains_muldiv (rtx x)
2244 {
2245 switch (GET_CODE (x))
2246 {
2247 case MOD: case DIV: case UMOD: case UDIV:
2248 return 1;
2249
2250 case MULT:
2251 return ! (CONST_INT_P (XEXP (x, 1))
2252 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2253 default:
2254 if (BINARY_P (x))
2255 return contains_muldiv (XEXP (x, 0))
2256 || contains_muldiv (XEXP (x, 1));
2257
2258 if (UNARY_P (x))
2259 return contains_muldiv (XEXP (x, 0));
2260
2261 return 0;
2262 }
2263 }
2264 \f
2265 /* Determine whether INSN can be used in a combination. Return nonzero if
2266 not. This is used in try_combine to detect early some cases where we
2267 can't perform combinations. */
2268
2269 static int
2270 cant_combine_insn_p (rtx_insn *insn)
2271 {
2272 rtx set;
2273 rtx src, dest;
2274
2275 /* If this isn't really an insn, we can't do anything.
2276 This can occur when flow deletes an insn that it has merged into an
2277 auto-increment address. */
2278 if (! INSN_P (insn))
2279 return 1;
2280
2281 /* Never combine loads and stores involving hard regs that are likely
2282 to be spilled. The register allocator can usually handle such
2283 reg-reg moves by tying. If we allow the combiner to make
2284 substitutions of likely-spilled regs, reload might die.
2285 As an exception, we allow combinations involving fixed regs; these are
2286 not available to the register allocator so there's no risk involved. */
2287
2288 set = single_set (insn);
2289 if (! set)
2290 return 0;
2291 src = SET_SRC (set);
2292 dest = SET_DEST (set);
2293 if (GET_CODE (src) == SUBREG)
2294 src = SUBREG_REG (src);
2295 if (GET_CODE (dest) == SUBREG)
2296 dest = SUBREG_REG (dest);
2297 if (REG_P (src) && REG_P (dest)
2298 && ((HARD_REGISTER_P (src)
2299 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2300 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2301 || (HARD_REGISTER_P (dest)
2302 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2303 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2304 return 1;
2305
2306 return 0;
2307 }
2308
2309 struct likely_spilled_retval_info
2310 {
2311 unsigned regno, nregs;
2312 unsigned mask;
2313 };
2314
2315 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2316 hard registers that are known to be written to / clobbered in full. */
2317 static void
2318 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2319 {
2320 struct likely_spilled_retval_info *const info =
2321 (struct likely_spilled_retval_info *) data;
2322 unsigned regno, nregs;
2323 unsigned new_mask;
2324
2325 if (!REG_P (XEXP (set, 0)))
2326 return;
2327 regno = REGNO (x);
2328 if (regno >= info->regno + info->nregs)
2329 return;
2330 nregs = REG_NREGS (x);
2331 if (regno + nregs <= info->regno)
2332 return;
2333 new_mask = (2U << (nregs - 1)) - 1;
2334 if (regno < info->regno)
2335 new_mask >>= info->regno - regno;
2336 else
2337 new_mask <<= regno - info->regno;
2338 info->mask &= ~new_mask;
2339 }
2340
2341 /* Return nonzero iff part of the return value is live during INSN, and
2342 it is likely spilled. This can happen when more than one insn is needed
2343 to copy the return value, e.g. when we consider to combine into the
2344 second copy insn for a complex value. */
2345
2346 static int
2347 likely_spilled_retval_p (rtx_insn *insn)
2348 {
2349 rtx_insn *use = BB_END (this_basic_block);
2350 rtx reg;
2351 rtx_insn *p;
2352 unsigned regno, nregs;
2353 /* We assume here that no machine mode needs more than
2354 32 hard registers when the value overlaps with a register
2355 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2356 unsigned mask;
2357 struct likely_spilled_retval_info info;
2358
2359 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2360 return 0;
2361 reg = XEXP (PATTERN (use), 0);
2362 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2363 return 0;
2364 regno = REGNO (reg);
2365 nregs = REG_NREGS (reg);
2366 if (nregs == 1)
2367 return 0;
2368 mask = (2U << (nregs - 1)) - 1;
2369
2370 /* Disregard parts of the return value that are set later. */
2371 info.regno = regno;
2372 info.nregs = nregs;
2373 info.mask = mask;
2374 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2375 if (INSN_P (p))
2376 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2377 mask = info.mask;
2378
2379 /* Check if any of the (probably) live return value registers is
2380 likely spilled. */
2381 nregs --;
2382 do
2383 {
2384 if ((mask & 1 << nregs)
2385 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2386 return 1;
2387 } while (nregs--);
2388 return 0;
2389 }
2390
2391 /* Adjust INSN after we made a change to its destination.
2392
2393 Changing the destination can invalidate notes that say something about
2394 the results of the insn and a LOG_LINK pointing to the insn. */
2395
2396 static void
2397 adjust_for_new_dest (rtx_insn *insn)
2398 {
2399 /* For notes, be conservative and simply remove them. */
2400 remove_reg_equal_equiv_notes (insn);
2401
2402 /* The new insn will have a destination that was previously the destination
2403 of an insn just above it. Call distribute_links to make a LOG_LINK from
2404 the next use of that destination. */
2405
2406 rtx set = single_set (insn);
2407 gcc_assert (set);
2408
2409 rtx reg = SET_DEST (set);
2410
2411 while (GET_CODE (reg) == ZERO_EXTRACT
2412 || GET_CODE (reg) == STRICT_LOW_PART
2413 || GET_CODE (reg) == SUBREG)
2414 reg = XEXP (reg, 0);
2415 gcc_assert (REG_P (reg));
2416
2417 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2418
2419 df_insn_rescan (insn);
2420 }
2421
2422 /* Return TRUE if combine can reuse reg X in mode MODE.
2423 ADDED_SETS is nonzero if the original set is still required. */
2424 static bool
2425 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2426 {
2427 unsigned int regno;
2428
2429 if (!REG_P (x))
2430 return false;
2431
2432 regno = REGNO (x);
2433 /* Allow hard registers if the new mode is legal, and occupies no more
2434 registers than the old mode. */
2435 if (regno < FIRST_PSEUDO_REGISTER)
2436 return (HARD_REGNO_MODE_OK (regno, mode)
2437 && REG_NREGS (x) >= hard_regno_nregs[regno][mode]);
2438
2439 /* Or a pseudo that is only used once. */
2440 return (regno < reg_n_sets_max
2441 && REG_N_SETS (regno) == 1
2442 && !added_sets
2443 && !REG_USERVAR_P (x));
2444 }
2445
2446
2447 /* Check whether X, the destination of a set, refers to part of
2448 the register specified by REG. */
2449
2450 static bool
2451 reg_subword_p (rtx x, rtx reg)
2452 {
2453 /* Check that reg is an integer mode register. */
2454 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2455 return false;
2456
2457 if (GET_CODE (x) == STRICT_LOW_PART
2458 || GET_CODE (x) == ZERO_EXTRACT)
2459 x = XEXP (x, 0);
2460
2461 return GET_CODE (x) == SUBREG
2462 && SUBREG_REG (x) == reg
2463 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2464 }
2465
2466 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2467 Note that the INSN should be deleted *after* removing dead edges, so
2468 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2469 but not for a (set (pc) (label_ref FOO)). */
2470
2471 static void
2472 update_cfg_for_uncondjump (rtx_insn *insn)
2473 {
2474 basic_block bb = BLOCK_FOR_INSN (insn);
2475 gcc_assert (BB_END (bb) == insn);
2476
2477 purge_dead_edges (bb);
2478
2479 delete_insn (insn);
2480 if (EDGE_COUNT (bb->succs) == 1)
2481 {
2482 rtx_insn *insn;
2483
2484 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2485
2486 /* Remove barriers from the footer if there are any. */
2487 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2488 if (BARRIER_P (insn))
2489 {
2490 if (PREV_INSN (insn))
2491 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2492 else
2493 BB_FOOTER (bb) = NEXT_INSN (insn);
2494 if (NEXT_INSN (insn))
2495 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2496 }
2497 else if (LABEL_P (insn))
2498 break;
2499 }
2500 }
2501
2502 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2503 by an arbitrary number of CLOBBERs. */
2504 static bool
2505 is_parallel_of_n_reg_sets (rtx pat, int n)
2506 {
2507 if (GET_CODE (pat) != PARALLEL)
2508 return false;
2509
2510 int len = XVECLEN (pat, 0);
2511 if (len < n)
2512 return false;
2513
2514 int i;
2515 for (i = 0; i < n; i++)
2516 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2517 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2518 return false;
2519 for ( ; i < len; i++)
2520 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2521 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2522 return false;
2523
2524 return true;
2525 }
2526
2527 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2528 CLOBBERs), can be split into individual SETs in that order, without
2529 changing semantics. */
2530 static bool
2531 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2532 {
2533 if (!insn_nothrow_p (insn))
2534 return false;
2535
2536 rtx pat = PATTERN (insn);
2537
2538 int i, j;
2539 for (i = 0; i < n; i++)
2540 {
2541 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2542 return false;
2543
2544 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2545
2546 for (j = i + 1; j < n; j++)
2547 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2548 return false;
2549 }
2550
2551 return true;
2552 }
2553
2554 /* Try to combine the insns I0, I1 and I2 into I3.
2555 Here I0, I1 and I2 appear earlier than I3.
2556 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2557 I3.
2558
2559 If we are combining more than two insns and the resulting insn is not
2560 recognized, try splitting it into two insns. If that happens, I2 and I3
2561 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2562 Otherwise, I0, I1 and I2 are pseudo-deleted.
2563
2564 Return 0 if the combination does not work. Then nothing is changed.
2565 If we did the combination, return the insn at which combine should
2566 resume scanning.
2567
2568 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2569 new direct jump instruction.
2570
2571 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2572 been I3 passed to an earlier try_combine within the same basic
2573 block. */
2574
2575 static rtx_insn *
2576 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2577 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2578 {
2579 /* New patterns for I3 and I2, respectively. */
2580 rtx newpat, newi2pat = 0;
2581 rtvec newpat_vec_with_clobbers = 0;
2582 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2583 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2584 dead. */
2585 int added_sets_0, added_sets_1, added_sets_2;
2586 /* Total number of SETs to put into I3. */
2587 int total_sets;
2588 /* Nonzero if I2's or I1's body now appears in I3. */
2589 int i2_is_used = 0, i1_is_used = 0;
2590 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2591 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2592 /* Contains I3 if the destination of I3 is used in its source, which means
2593 that the old life of I3 is being killed. If that usage is placed into
2594 I2 and not in I3, a REG_DEAD note must be made. */
2595 rtx i3dest_killed = 0;
2596 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2597 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2598 /* Copy of SET_SRC of I1 and I0, if needed. */
2599 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2600 /* Set if I2DEST was reused as a scratch register. */
2601 bool i2scratch = false;
2602 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2603 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2604 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2605 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2606 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2607 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2608 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2609 /* Notes that must be added to REG_NOTES in I3 and I2. */
2610 rtx new_i3_notes, new_i2_notes;
2611 /* Notes that we substituted I3 into I2 instead of the normal case. */
2612 int i3_subst_into_i2 = 0;
2613 /* Notes that I1, I2 or I3 is a MULT operation. */
2614 int have_mult = 0;
2615 int swap_i2i3 = 0;
2616 int changed_i3_dest = 0;
2617
2618 int maxreg;
2619 rtx_insn *temp_insn;
2620 rtx temp_expr;
2621 struct insn_link *link;
2622 rtx other_pat = 0;
2623 rtx new_other_notes;
2624 int i;
2625
2626 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2627 never be). */
2628 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2629 return 0;
2630
2631 /* Only try four-insn combinations when there's high likelihood of
2632 success. Look for simple insns, such as loads of constants or
2633 binary operations involving a constant. */
2634 if (i0)
2635 {
2636 int i;
2637 int ngood = 0;
2638 int nshift = 0;
2639 rtx set0, set3;
2640
2641 if (!flag_expensive_optimizations)
2642 return 0;
2643
2644 for (i = 0; i < 4; i++)
2645 {
2646 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2647 rtx set = single_set (insn);
2648 rtx src;
2649 if (!set)
2650 continue;
2651 src = SET_SRC (set);
2652 if (CONSTANT_P (src))
2653 {
2654 ngood += 2;
2655 break;
2656 }
2657 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2658 ngood++;
2659 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2660 || GET_CODE (src) == LSHIFTRT)
2661 nshift++;
2662 }
2663
2664 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2665 are likely manipulating its value. Ideally we'll be able to combine
2666 all four insns into a bitfield insertion of some kind.
2667
2668 Note the source in I0 might be inside a sign/zero extension and the
2669 memory modes in I0 and I3 might be different. So extract the address
2670 from the destination of I3 and search for it in the source of I0.
2671
2672 In the event that there's a match but the source/dest do not actually
2673 refer to the same memory, the worst that happens is we try some
2674 combinations that we wouldn't have otherwise. */
2675 if ((set0 = single_set (i0))
2676 /* Ensure the source of SET0 is a MEM, possibly buried inside
2677 an extension. */
2678 && (GET_CODE (SET_SRC (set0)) == MEM
2679 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2680 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2681 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2682 && (set3 = single_set (i3))
2683 /* Ensure the destination of SET3 is a MEM. */
2684 && GET_CODE (SET_DEST (set3)) == MEM
2685 /* Would it be better to extract the base address for the MEM
2686 in SET3 and look for that? I don't have cases where it matters
2687 but I could envision such cases. */
2688 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2689 ngood += 2;
2690
2691 if (ngood < 2 && nshift < 2)
2692 return 0;
2693 }
2694
2695 /* Exit early if one of the insns involved can't be used for
2696 combinations. */
2697 if (CALL_P (i2)
2698 || (i1 && CALL_P (i1))
2699 || (i0 && CALL_P (i0))
2700 || cant_combine_insn_p (i3)
2701 || cant_combine_insn_p (i2)
2702 || (i1 && cant_combine_insn_p (i1))
2703 || (i0 && cant_combine_insn_p (i0))
2704 || likely_spilled_retval_p (i3))
2705 return 0;
2706
2707 combine_attempts++;
2708 undobuf.other_insn = 0;
2709
2710 /* Reset the hard register usage information. */
2711 CLEAR_HARD_REG_SET (newpat_used_regs);
2712
2713 if (dump_file && (dump_flags & TDF_DETAILS))
2714 {
2715 if (i0)
2716 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2717 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2718 else if (i1)
2719 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2720 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2721 else
2722 fprintf (dump_file, "\nTrying %d -> %d:\n",
2723 INSN_UID (i2), INSN_UID (i3));
2724 }
2725
2726 /* If multiple insns feed into one of I2 or I3, they can be in any
2727 order. To simplify the code below, reorder them in sequence. */
2728 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2729 std::swap (i0, i2);
2730 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2731 std::swap (i0, i1);
2732 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2733 std::swap (i1, i2);
2734
2735 added_links_insn = 0;
2736
2737 /* First check for one important special case that the code below will
2738 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2739 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2740 we may be able to replace that destination with the destination of I3.
2741 This occurs in the common code where we compute both a quotient and
2742 remainder into a structure, in which case we want to do the computation
2743 directly into the structure to avoid register-register copies.
2744
2745 Note that this case handles both multiple sets in I2 and also cases
2746 where I2 has a number of CLOBBERs inside the PARALLEL.
2747
2748 We make very conservative checks below and only try to handle the
2749 most common cases of this. For example, we only handle the case
2750 where I2 and I3 are adjacent to avoid making difficult register
2751 usage tests. */
2752
2753 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2754 && REG_P (SET_SRC (PATTERN (i3)))
2755 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2756 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2757 && GET_CODE (PATTERN (i2)) == PARALLEL
2758 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2759 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2760 below would need to check what is inside (and reg_overlap_mentioned_p
2761 doesn't support those codes anyway). Don't allow those destinations;
2762 the resulting insn isn't likely to be recognized anyway. */
2763 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2764 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2765 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2766 SET_DEST (PATTERN (i3)))
2767 && next_active_insn (i2) == i3)
2768 {
2769 rtx p2 = PATTERN (i2);
2770
2771 /* Make sure that the destination of I3,
2772 which we are going to substitute into one output of I2,
2773 is not used within another output of I2. We must avoid making this:
2774 (parallel [(set (mem (reg 69)) ...)
2775 (set (reg 69) ...)])
2776 which is not well-defined as to order of actions.
2777 (Besides, reload can't handle output reloads for this.)
2778
2779 The problem can also happen if the dest of I3 is a memory ref,
2780 if another dest in I2 is an indirect memory ref. */
2781 for (i = 0; i < XVECLEN (p2, 0); i++)
2782 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2783 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2784 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2785 SET_DEST (XVECEXP (p2, 0, i))))
2786 break;
2787
2788 /* Make sure this PARALLEL is not an asm. We do not allow combining
2789 that usually (see can_combine_p), so do not here either. */
2790 for (i = 0; i < XVECLEN (p2, 0); i++)
2791 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2792 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2793 break;
2794
2795 if (i == XVECLEN (p2, 0))
2796 for (i = 0; i < XVECLEN (p2, 0); i++)
2797 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2798 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2799 {
2800 combine_merges++;
2801
2802 subst_insn = i3;
2803 subst_low_luid = DF_INSN_LUID (i2);
2804
2805 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2806 i2src = SET_SRC (XVECEXP (p2, 0, i));
2807 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2808 i2dest_killed = dead_or_set_p (i2, i2dest);
2809
2810 /* Replace the dest in I2 with our dest and make the resulting
2811 insn the new pattern for I3. Then skip to where we validate
2812 the pattern. Everything was set up above. */
2813 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2814 newpat = p2;
2815 i3_subst_into_i2 = 1;
2816 goto validate_replacement;
2817 }
2818 }
2819
2820 /* If I2 is setting a pseudo to a constant and I3 is setting some
2821 sub-part of it to another constant, merge them by making a new
2822 constant. */
2823 if (i1 == 0
2824 && (temp_expr = single_set (i2)) != 0
2825 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2826 && GET_CODE (PATTERN (i3)) == SET
2827 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2828 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2829 {
2830 rtx dest = SET_DEST (PATTERN (i3));
2831 int offset = -1;
2832 int width = 0;
2833
2834 if (GET_CODE (dest) == ZERO_EXTRACT)
2835 {
2836 if (CONST_INT_P (XEXP (dest, 1))
2837 && CONST_INT_P (XEXP (dest, 2)))
2838 {
2839 width = INTVAL (XEXP (dest, 1));
2840 offset = INTVAL (XEXP (dest, 2));
2841 dest = XEXP (dest, 0);
2842 if (BITS_BIG_ENDIAN)
2843 offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2844 }
2845 }
2846 else
2847 {
2848 if (GET_CODE (dest) == STRICT_LOW_PART)
2849 dest = XEXP (dest, 0);
2850 width = GET_MODE_PRECISION (GET_MODE (dest));
2851 offset = 0;
2852 }
2853
2854 if (offset >= 0)
2855 {
2856 /* If this is the low part, we're done. */
2857 if (subreg_lowpart_p (dest))
2858 ;
2859 /* Handle the case where inner is twice the size of outer. */
2860 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr)))
2861 == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2862 offset += GET_MODE_PRECISION (GET_MODE (dest));
2863 /* Otherwise give up for now. */
2864 else
2865 offset = -1;
2866 }
2867
2868 if (offset >= 0)
2869 {
2870 rtx inner = SET_SRC (PATTERN (i3));
2871 rtx outer = SET_SRC (temp_expr);
2872
2873 wide_int o
2874 = wi::insert (rtx_mode_t (outer, GET_MODE (SET_DEST (temp_expr))),
2875 rtx_mode_t (inner, GET_MODE (dest)),
2876 offset, width);
2877
2878 combine_merges++;
2879 subst_insn = i3;
2880 subst_low_luid = DF_INSN_LUID (i2);
2881 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2882 i2dest = SET_DEST (temp_expr);
2883 i2dest_killed = dead_or_set_p (i2, i2dest);
2884
2885 /* Replace the source in I2 with the new constant and make the
2886 resulting insn the new pattern for I3. Then skip to where we
2887 validate the pattern. Everything was set up above. */
2888 SUBST (SET_SRC (temp_expr),
2889 immed_wide_int_const (o, GET_MODE (SET_DEST (temp_expr))));
2890
2891 newpat = PATTERN (i2);
2892
2893 /* The dest of I3 has been replaced with the dest of I2. */
2894 changed_i3_dest = 1;
2895 goto validate_replacement;
2896 }
2897 }
2898
2899 /* If we have no I1 and I2 looks like:
2900 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2901 (set Y OP)])
2902 make up a dummy I1 that is
2903 (set Y OP)
2904 and change I2 to be
2905 (set (reg:CC X) (compare:CC Y (const_int 0)))
2906
2907 (We can ignore any trailing CLOBBERs.)
2908
2909 This undoes a previous combination and allows us to match a branch-and-
2910 decrement insn. */
2911
2912 if (!HAVE_cc0 && i1 == 0
2913 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2914 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2915 == MODE_CC)
2916 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2917 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2918 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2919 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2920 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2921 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2922 {
2923 /* We make I1 with the same INSN_UID as I2. This gives it
2924 the same DF_INSN_LUID for value tracking. Our fake I1 will
2925 never appear in the insn stream so giving it the same INSN_UID
2926 as I2 will not cause a problem. */
2927
2928 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2929 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2930 -1, NULL_RTX);
2931 INSN_UID (i1) = INSN_UID (i2);
2932
2933 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2934 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2935 SET_DEST (PATTERN (i1)));
2936 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2937 SUBST_LINK (LOG_LINKS (i2),
2938 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2939 }
2940
2941 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2942 make those two SETs separate I1 and I2 insns, and make an I0 that is
2943 the original I1. */
2944 if (!HAVE_cc0 && i0 == 0
2945 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2946 && can_split_parallel_of_n_reg_sets (i2, 2)
2947 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2948 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2949 {
2950 /* If there is no I1, there is no I0 either. */
2951 i0 = i1;
2952
2953 /* We make I1 with the same INSN_UID as I2. This gives it
2954 the same DF_INSN_LUID for value tracking. Our fake I1 will
2955 never appear in the insn stream so giving it the same INSN_UID
2956 as I2 will not cause a problem. */
2957
2958 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2959 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2960 -1, NULL_RTX);
2961 INSN_UID (i1) = INSN_UID (i2);
2962
2963 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2964 }
2965
2966 /* Verify that I2 and I1 are valid for combining. */
2967 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2968 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
2969 &i1dest, &i1src))
2970 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
2971 &i0dest, &i0src)))
2972 {
2973 undo_all ();
2974 return 0;
2975 }
2976
2977 /* Record whether I2DEST is used in I2SRC and similarly for the other
2978 cases. Knowing this will help in register status updating below. */
2979 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2980 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2981 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2982 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2983 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2984 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2985 i2dest_killed = dead_or_set_p (i2, i2dest);
2986 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2987 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2988
2989 /* For the earlier insns, determine which of the subsequent ones they
2990 feed. */
2991 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2992 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2993 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2994 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2995 && reg_overlap_mentioned_p (i0dest, i2src))));
2996
2997 /* Ensure that I3's pattern can be the destination of combines. */
2998 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2999 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3000 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3001 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3002 &i3dest_killed))
3003 {
3004 undo_all ();
3005 return 0;
3006 }
3007
3008 /* See if any of the insns is a MULT operation. Unless one is, we will
3009 reject a combination that is, since it must be slower. Be conservative
3010 here. */
3011 if (GET_CODE (i2src) == MULT
3012 || (i1 != 0 && GET_CODE (i1src) == MULT)
3013 || (i0 != 0 && GET_CODE (i0src) == MULT)
3014 || (GET_CODE (PATTERN (i3)) == SET
3015 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3016 have_mult = 1;
3017
3018 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3019 We used to do this EXCEPT in one case: I3 has a post-inc in an
3020 output operand. However, that exception can give rise to insns like
3021 mov r3,(r3)+
3022 which is a famous insn on the PDP-11 where the value of r3 used as the
3023 source was model-dependent. Avoid this sort of thing. */
3024
3025 #if 0
3026 if (!(GET_CODE (PATTERN (i3)) == SET
3027 && REG_P (SET_SRC (PATTERN (i3)))
3028 && MEM_P (SET_DEST (PATTERN (i3)))
3029 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3030 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3031 /* It's not the exception. */
3032 #endif
3033 if (AUTO_INC_DEC)
3034 {
3035 rtx link;
3036 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3037 if (REG_NOTE_KIND (link) == REG_INC
3038 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3039 || (i1 != 0
3040 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3041 {
3042 undo_all ();
3043 return 0;
3044 }
3045 }
3046
3047 /* See if the SETs in I1 or I2 need to be kept around in the merged
3048 instruction: whenever the value set there is still needed past I3.
3049 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3050
3051 For the SET in I1, we have two cases: if I1 and I2 independently feed
3052 into I3, the set in I1 needs to be kept around unless I1DEST dies
3053 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3054 in I1 needs to be kept around unless I1DEST dies or is set in either
3055 I2 or I3. The same considerations apply to I0. */
3056
3057 added_sets_2 = !dead_or_set_p (i3, i2dest);
3058
3059 if (i1)
3060 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3061 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3062 else
3063 added_sets_1 = 0;
3064
3065 if (i0)
3066 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3067 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3068 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3069 && dead_or_set_p (i2, i0dest)));
3070 else
3071 added_sets_0 = 0;
3072
3073 /* We are about to copy insns for the case where they need to be kept
3074 around. Check that they can be copied in the merged instruction. */
3075
3076 if (targetm.cannot_copy_insn_p
3077 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3078 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3079 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3080 {
3081 undo_all ();
3082 return 0;
3083 }
3084
3085 /* If the set in I2 needs to be kept around, we must make a copy of
3086 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3087 PATTERN (I2), we are only substituting for the original I1DEST, not into
3088 an already-substituted copy. This also prevents making self-referential
3089 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3090 I2DEST. */
3091
3092 if (added_sets_2)
3093 {
3094 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3095 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3096 else
3097 i2pat = copy_rtx (PATTERN (i2));
3098 }
3099
3100 if (added_sets_1)
3101 {
3102 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3103 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3104 else
3105 i1pat = copy_rtx (PATTERN (i1));
3106 }
3107
3108 if (added_sets_0)
3109 {
3110 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3111 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3112 else
3113 i0pat = copy_rtx (PATTERN (i0));
3114 }
3115
3116 combine_merges++;
3117
3118 /* Substitute in the latest insn for the regs set by the earlier ones. */
3119
3120 maxreg = max_reg_num ();
3121
3122 subst_insn = i3;
3123
3124 /* Many machines that don't use CC0 have insns that can both perform an
3125 arithmetic operation and set the condition code. These operations will
3126 be represented as a PARALLEL with the first element of the vector
3127 being a COMPARE of an arithmetic operation with the constant zero.
3128 The second element of the vector will set some pseudo to the result
3129 of the same arithmetic operation. If we simplify the COMPARE, we won't
3130 match such a pattern and so will generate an extra insn. Here we test
3131 for this case, where both the comparison and the operation result are
3132 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3133 I2SRC. Later we will make the PARALLEL that contains I2. */
3134
3135 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3136 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3137 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3138 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3139 {
3140 rtx newpat_dest;
3141 rtx *cc_use_loc = NULL;
3142 rtx_insn *cc_use_insn = NULL;
3143 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3144 machine_mode compare_mode, orig_compare_mode;
3145 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3146
3147 newpat = PATTERN (i3);
3148 newpat_dest = SET_DEST (newpat);
3149 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3150
3151 if (undobuf.other_insn == 0
3152 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3153 &cc_use_insn)))
3154 {
3155 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3156 compare_code = simplify_compare_const (compare_code,
3157 GET_MODE (i2dest), op0, &op1);
3158 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3159 }
3160
3161 /* Do the rest only if op1 is const0_rtx, which may be the
3162 result of simplification. */
3163 if (op1 == const0_rtx)
3164 {
3165 /* If a single use of the CC is found, prepare to modify it
3166 when SELECT_CC_MODE returns a new CC-class mode, or when
3167 the above simplify_compare_const() returned a new comparison
3168 operator. undobuf.other_insn is assigned the CC use insn
3169 when modifying it. */
3170 if (cc_use_loc)
3171 {
3172 #ifdef SELECT_CC_MODE
3173 machine_mode new_mode
3174 = SELECT_CC_MODE (compare_code, op0, op1);
3175 if (new_mode != orig_compare_mode
3176 && can_change_dest_mode (SET_DEST (newpat),
3177 added_sets_2, new_mode))
3178 {
3179 unsigned int regno = REGNO (newpat_dest);
3180 compare_mode = new_mode;
3181 if (regno < FIRST_PSEUDO_REGISTER)
3182 newpat_dest = gen_rtx_REG (compare_mode, regno);
3183 else
3184 {
3185 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3186 newpat_dest = regno_reg_rtx[regno];
3187 }
3188 }
3189 #endif
3190 /* Cases for modifying the CC-using comparison. */
3191 if (compare_code != orig_compare_code
3192 /* ??? Do we need to verify the zero rtx? */
3193 && XEXP (*cc_use_loc, 1) == const0_rtx)
3194 {
3195 /* Replace cc_use_loc with entire new RTX. */
3196 SUBST (*cc_use_loc,
3197 gen_rtx_fmt_ee (compare_code, compare_mode,
3198 newpat_dest, const0_rtx));
3199 undobuf.other_insn = cc_use_insn;
3200 }
3201 else if (compare_mode != orig_compare_mode)
3202 {
3203 /* Just replace the CC reg with a new mode. */
3204 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3205 undobuf.other_insn = cc_use_insn;
3206 }
3207 }
3208
3209 /* Now we modify the current newpat:
3210 First, SET_DEST(newpat) is updated if the CC mode has been
3211 altered. For targets without SELECT_CC_MODE, this should be
3212 optimized away. */
3213 if (compare_mode != orig_compare_mode)
3214 SUBST (SET_DEST (newpat), newpat_dest);
3215 /* This is always done to propagate i2src into newpat. */
3216 SUBST (SET_SRC (newpat),
3217 gen_rtx_COMPARE (compare_mode, op0, op1));
3218 /* Create new version of i2pat if needed; the below PARALLEL
3219 creation needs this to work correctly. */
3220 if (! rtx_equal_p (i2src, op0))
3221 i2pat = gen_rtx_SET (i2dest, op0);
3222 i2_is_used = 1;
3223 }
3224 }
3225
3226 if (i2_is_used == 0)
3227 {
3228 /* It is possible that the source of I2 or I1 may be performing
3229 an unneeded operation, such as a ZERO_EXTEND of something
3230 that is known to have the high part zero. Handle that case
3231 by letting subst look at the inner insns.
3232
3233 Another way to do this would be to have a function that tries
3234 to simplify a single insn instead of merging two or more
3235 insns. We don't do this because of the potential of infinite
3236 loops and because of the potential extra memory required.
3237 However, doing it the way we are is a bit of a kludge and
3238 doesn't catch all cases.
3239
3240 But only do this if -fexpensive-optimizations since it slows
3241 things down and doesn't usually win.
3242
3243 This is not done in the COMPARE case above because the
3244 unmodified I2PAT is used in the PARALLEL and so a pattern
3245 with a modified I2SRC would not match. */
3246
3247 if (flag_expensive_optimizations)
3248 {
3249 /* Pass pc_rtx so no substitutions are done, just
3250 simplifications. */
3251 if (i1)
3252 {
3253 subst_low_luid = DF_INSN_LUID (i1);
3254 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3255 }
3256
3257 subst_low_luid = DF_INSN_LUID (i2);
3258 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3259 }
3260
3261 n_occurrences = 0; /* `subst' counts here */
3262 subst_low_luid = DF_INSN_LUID (i2);
3263
3264 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3265 copy of I2SRC each time we substitute it, in order to avoid creating
3266 self-referential RTL when we will be substituting I1SRC for I1DEST
3267 later. Likewise if I0 feeds into I2, either directly or indirectly
3268 through I1, and I0DEST is in I0SRC. */
3269 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3270 (i1_feeds_i2_n && i1dest_in_i1src)
3271 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3272 && i0dest_in_i0src));
3273 substed_i2 = 1;
3274
3275 /* Record whether I2's body now appears within I3's body. */
3276 i2_is_used = n_occurrences;
3277 }
3278
3279 /* If we already got a failure, don't try to do more. Otherwise, try to
3280 substitute I1 if we have it. */
3281
3282 if (i1 && GET_CODE (newpat) != CLOBBER)
3283 {
3284 /* Check that an autoincrement side-effect on I1 has not been lost.
3285 This happens if I1DEST is mentioned in I2 and dies there, and
3286 has disappeared from the new pattern. */
3287 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3288 && i1_feeds_i2_n
3289 && dead_or_set_p (i2, i1dest)
3290 && !reg_overlap_mentioned_p (i1dest, newpat))
3291 /* Before we can do this substitution, we must redo the test done
3292 above (see detailed comments there) that ensures I1DEST isn't
3293 mentioned in any SETs in NEWPAT that are field assignments. */
3294 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3295 0, 0, 0))
3296 {
3297 undo_all ();
3298 return 0;
3299 }
3300
3301 n_occurrences = 0;
3302 subst_low_luid = DF_INSN_LUID (i1);
3303
3304 /* If the following substitution will modify I1SRC, make a copy of it
3305 for the case where it is substituted for I1DEST in I2PAT later. */
3306 if (added_sets_2 && i1_feeds_i2_n)
3307 i1src_copy = copy_rtx (i1src);
3308
3309 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3310 copy of I1SRC each time we substitute it, in order to avoid creating
3311 self-referential RTL when we will be substituting I0SRC for I0DEST
3312 later. */
3313 newpat = subst (newpat, i1dest, i1src, 0, 0,
3314 i0_feeds_i1_n && i0dest_in_i0src);
3315 substed_i1 = 1;
3316
3317 /* Record whether I1's body now appears within I3's body. */
3318 i1_is_used = n_occurrences;
3319 }
3320
3321 /* Likewise for I0 if we have it. */
3322
3323 if (i0 && GET_CODE (newpat) != CLOBBER)
3324 {
3325 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3326 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3327 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3328 && !reg_overlap_mentioned_p (i0dest, newpat))
3329 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3330 0, 0, 0))
3331 {
3332 undo_all ();
3333 return 0;
3334 }
3335
3336 /* If the following substitution will modify I0SRC, make a copy of it
3337 for the case where it is substituted for I0DEST in I1PAT later. */
3338 if (added_sets_1 && i0_feeds_i1_n)
3339 i0src_copy = copy_rtx (i0src);
3340 /* And a copy for I0DEST in I2PAT substitution. */
3341 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3342 || (i0_feeds_i2_n)))
3343 i0src_copy2 = copy_rtx (i0src);
3344
3345 n_occurrences = 0;
3346 subst_low_luid = DF_INSN_LUID (i0);
3347 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3348 substed_i0 = 1;
3349 }
3350
3351 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3352 to count all the ways that I2SRC and I1SRC can be used. */
3353 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3354 && i2_is_used + added_sets_2 > 1)
3355 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3356 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3357 > 1))
3358 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3359 && (n_occurrences + added_sets_0
3360 + (added_sets_1 && i0_feeds_i1_n)
3361 + (added_sets_2 && i0_feeds_i2_n)
3362 > 1))
3363 /* Fail if we tried to make a new register. */
3364 || max_reg_num () != maxreg
3365 /* Fail if we couldn't do something and have a CLOBBER. */
3366 || GET_CODE (newpat) == CLOBBER
3367 /* Fail if this new pattern is a MULT and we didn't have one before
3368 at the outer level. */
3369 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3370 && ! have_mult))
3371 {
3372 undo_all ();
3373 return 0;
3374 }
3375
3376 /* If the actions of the earlier insns must be kept
3377 in addition to substituting them into the latest one,
3378 we must make a new PARALLEL for the latest insn
3379 to hold additional the SETs. */
3380
3381 if (added_sets_0 || added_sets_1 || added_sets_2)
3382 {
3383 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3384 combine_extras++;
3385
3386 if (GET_CODE (newpat) == PARALLEL)
3387 {
3388 rtvec old = XVEC (newpat, 0);
3389 total_sets = XVECLEN (newpat, 0) + extra_sets;
3390 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3391 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3392 sizeof (old->elem[0]) * old->num_elem);
3393 }
3394 else
3395 {
3396 rtx old = newpat;
3397 total_sets = 1 + extra_sets;
3398 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3399 XVECEXP (newpat, 0, 0) = old;
3400 }
3401
3402 if (added_sets_0)
3403 XVECEXP (newpat, 0, --total_sets) = i0pat;
3404
3405 if (added_sets_1)
3406 {
3407 rtx t = i1pat;
3408 if (i0_feeds_i1_n)
3409 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3410
3411 XVECEXP (newpat, 0, --total_sets) = t;
3412 }
3413 if (added_sets_2)
3414 {
3415 rtx t = i2pat;
3416 if (i1_feeds_i2_n)
3417 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3418 i0_feeds_i1_n && i0dest_in_i0src);
3419 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3420 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3421
3422 XVECEXP (newpat, 0, --total_sets) = t;
3423 }
3424 }
3425
3426 validate_replacement:
3427
3428 /* Note which hard regs this insn has as inputs. */
3429 mark_used_regs_combine (newpat);
3430
3431 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3432 consider splitting this pattern, we might need these clobbers. */
3433 if (i1 && GET_CODE (newpat) == PARALLEL
3434 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3435 {
3436 int len = XVECLEN (newpat, 0);
3437
3438 newpat_vec_with_clobbers = rtvec_alloc (len);
3439 for (i = 0; i < len; i++)
3440 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3441 }
3442
3443 /* We have recognized nothing yet. */
3444 insn_code_number = -1;
3445
3446 /* See if this is a PARALLEL of two SETs where one SET's destination is
3447 a register that is unused and this isn't marked as an instruction that
3448 might trap in an EH region. In that case, we just need the other SET.
3449 We prefer this over the PARALLEL.
3450
3451 This can occur when simplifying a divmod insn. We *must* test for this
3452 case here because the code below that splits two independent SETs doesn't
3453 handle this case correctly when it updates the register status.
3454
3455 It's pointless doing this if we originally had two sets, one from
3456 i3, and one from i2. Combining then splitting the parallel results
3457 in the original i2 again plus an invalid insn (which we delete).
3458 The net effect is only to move instructions around, which makes
3459 debug info less accurate. */
3460
3461 if (!(added_sets_2 && i1 == 0)
3462 && is_parallel_of_n_reg_sets (newpat, 2)
3463 && asm_noperands (newpat) < 0)
3464 {
3465 rtx set0 = XVECEXP (newpat, 0, 0);
3466 rtx set1 = XVECEXP (newpat, 0, 1);
3467 rtx oldpat = newpat;
3468
3469 if (((REG_P (SET_DEST (set1))
3470 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3471 || (GET_CODE (SET_DEST (set1)) == SUBREG
3472 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3473 && insn_nothrow_p (i3)
3474 && !side_effects_p (SET_SRC (set1)))
3475 {
3476 newpat = set0;
3477 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3478 }
3479
3480 else if (((REG_P (SET_DEST (set0))
3481 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3482 || (GET_CODE (SET_DEST (set0)) == SUBREG
3483 && find_reg_note (i3, REG_UNUSED,
3484 SUBREG_REG (SET_DEST (set0)))))
3485 && insn_nothrow_p (i3)
3486 && !side_effects_p (SET_SRC (set0)))
3487 {
3488 newpat = set1;
3489 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3490
3491 if (insn_code_number >= 0)
3492 changed_i3_dest = 1;
3493 }
3494
3495 if (insn_code_number < 0)
3496 newpat = oldpat;
3497 }
3498
3499 /* Is the result of combination a valid instruction? */
3500 if (insn_code_number < 0)
3501 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3502
3503 /* If we were combining three insns and the result is a simple SET
3504 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3505 insns. There are two ways to do this. It can be split using a
3506 machine-specific method (like when you have an addition of a large
3507 constant) or by combine in the function find_split_point. */
3508
3509 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3510 && asm_noperands (newpat) < 0)
3511 {
3512 rtx parallel, *split;
3513 rtx_insn *m_split_insn;
3514
3515 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3516 use I2DEST as a scratch register will help. In the latter case,
3517 convert I2DEST to the mode of the source of NEWPAT if we can. */
3518
3519 m_split_insn = combine_split_insns (newpat, i3);
3520
3521 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3522 inputs of NEWPAT. */
3523
3524 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3525 possible to try that as a scratch reg. This would require adding
3526 more code to make it work though. */
3527
3528 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3529 {
3530 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3531
3532 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3533 (temporarily, until we are committed to this instruction
3534 combination) does not work: for example, any call to nonzero_bits
3535 on the register (from a splitter in the MD file, for example)
3536 will get the old information, which is invalid.
3537
3538 Since nowadays we can create registers during combine just fine,
3539 we should just create a new one here, not reuse i2dest. */
3540
3541 /* First try to split using the original register as a
3542 scratch register. */
3543 parallel = gen_rtx_PARALLEL (VOIDmode,
3544 gen_rtvec (2, newpat,
3545 gen_rtx_CLOBBER (VOIDmode,
3546 i2dest)));
3547 m_split_insn = combine_split_insns (parallel, i3);
3548
3549 /* If that didn't work, try changing the mode of I2DEST if
3550 we can. */
3551 if (m_split_insn == 0
3552 && new_mode != GET_MODE (i2dest)
3553 && new_mode != VOIDmode
3554 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3555 {
3556 machine_mode old_mode = GET_MODE (i2dest);
3557 rtx ni2dest;
3558
3559 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3560 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3561 else
3562 {
3563 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3564 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3565 }
3566
3567 parallel = (gen_rtx_PARALLEL
3568 (VOIDmode,
3569 gen_rtvec (2, newpat,
3570 gen_rtx_CLOBBER (VOIDmode,
3571 ni2dest))));
3572 m_split_insn = combine_split_insns (parallel, i3);
3573
3574 if (m_split_insn == 0
3575 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3576 {
3577 struct undo *buf;
3578
3579 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3580 buf = undobuf.undos;
3581 undobuf.undos = buf->next;
3582 buf->next = undobuf.frees;
3583 undobuf.frees = buf;
3584 }
3585 }
3586
3587 i2scratch = m_split_insn != 0;
3588 }
3589
3590 /* If recog_for_combine has discarded clobbers, try to use them
3591 again for the split. */
3592 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3593 {
3594 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3595 m_split_insn = combine_split_insns (parallel, i3);
3596 }
3597
3598 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3599 {
3600 rtx m_split_pat = PATTERN (m_split_insn);
3601 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3602 if (insn_code_number >= 0)
3603 newpat = m_split_pat;
3604 }
3605 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3606 && (next_nonnote_nondebug_insn (i2) == i3
3607 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3608 {
3609 rtx i2set, i3set;
3610 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3611 newi2pat = PATTERN (m_split_insn);
3612
3613 i3set = single_set (NEXT_INSN (m_split_insn));
3614 i2set = single_set (m_split_insn);
3615
3616 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3617
3618 /* If I2 or I3 has multiple SETs, we won't know how to track
3619 register status, so don't use these insns. If I2's destination
3620 is used between I2 and I3, we also can't use these insns. */
3621
3622 if (i2_code_number >= 0 && i2set && i3set
3623 && (next_nonnote_nondebug_insn (i2) == i3
3624 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3625 insn_code_number = recog_for_combine (&newi3pat, i3,
3626 &new_i3_notes);
3627 if (insn_code_number >= 0)
3628 newpat = newi3pat;
3629
3630 /* It is possible that both insns now set the destination of I3.
3631 If so, we must show an extra use of it. */
3632
3633 if (insn_code_number >= 0)
3634 {
3635 rtx new_i3_dest = SET_DEST (i3set);
3636 rtx new_i2_dest = SET_DEST (i2set);
3637
3638 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3639 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3640 || GET_CODE (new_i3_dest) == SUBREG)
3641 new_i3_dest = XEXP (new_i3_dest, 0);
3642
3643 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3644 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3645 || GET_CODE (new_i2_dest) == SUBREG)
3646 new_i2_dest = XEXP (new_i2_dest, 0);
3647
3648 if (REG_P (new_i3_dest)
3649 && REG_P (new_i2_dest)
3650 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3651 && REGNO (new_i2_dest) < reg_n_sets_max)
3652 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3653 }
3654 }
3655
3656 /* If we can split it and use I2DEST, go ahead and see if that
3657 helps things be recognized. Verify that none of the registers
3658 are set between I2 and I3. */
3659 if (insn_code_number < 0
3660 && (split = find_split_point (&newpat, i3, false)) != 0
3661 && (!HAVE_cc0 || REG_P (i2dest))
3662 /* We need I2DEST in the proper mode. If it is a hard register
3663 or the only use of a pseudo, we can change its mode.
3664 Make sure we don't change a hard register to have a mode that
3665 isn't valid for it, or change the number of registers. */
3666 && (GET_MODE (*split) == GET_MODE (i2dest)
3667 || GET_MODE (*split) == VOIDmode
3668 || can_change_dest_mode (i2dest, added_sets_2,
3669 GET_MODE (*split)))
3670 && (next_nonnote_nondebug_insn (i2) == i3
3671 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3672 /* We can't overwrite I2DEST if its value is still used by
3673 NEWPAT. */
3674 && ! reg_referenced_p (i2dest, newpat))
3675 {
3676 rtx newdest = i2dest;
3677 enum rtx_code split_code = GET_CODE (*split);
3678 machine_mode split_mode = GET_MODE (*split);
3679 bool subst_done = false;
3680 newi2pat = NULL_RTX;
3681
3682 i2scratch = true;
3683
3684 /* *SPLIT may be part of I2SRC, so make sure we have the
3685 original expression around for later debug processing.
3686 We should not need I2SRC any more in other cases. */
3687 if (MAY_HAVE_DEBUG_INSNS)
3688 i2src = copy_rtx (i2src);
3689 else
3690 i2src = NULL;
3691
3692 /* Get NEWDEST as a register in the proper mode. We have already
3693 validated that we can do this. */
3694 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3695 {
3696 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3697 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3698 else
3699 {
3700 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3701 newdest = regno_reg_rtx[REGNO (i2dest)];
3702 }
3703 }
3704
3705 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3706 an ASHIFT. This can occur if it was inside a PLUS and hence
3707 appeared to be a memory address. This is a kludge. */
3708 if (split_code == MULT
3709 && CONST_INT_P (XEXP (*split, 1))
3710 && INTVAL (XEXP (*split, 1)) > 0
3711 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3712 {
3713 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3714 XEXP (*split, 0), GEN_INT (i)));
3715 /* Update split_code because we may not have a multiply
3716 anymore. */
3717 split_code = GET_CODE (*split);
3718 }
3719
3720 /* Similarly for (plus (mult FOO (const_int pow2))). */
3721 if (split_code == PLUS
3722 && GET_CODE (XEXP (*split, 0)) == MULT
3723 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3724 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3725 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3726 {
3727 rtx nsplit = XEXP (*split, 0);
3728 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3729 XEXP (nsplit, 0), GEN_INT (i)));
3730 /* Update split_code because we may not have a multiply
3731 anymore. */
3732 split_code = GET_CODE (*split);
3733 }
3734
3735 #ifdef INSN_SCHEDULING
3736 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3737 be written as a ZERO_EXTEND. */
3738 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3739 {
3740 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3741 what it really is. */
3742 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3743 == SIGN_EXTEND)
3744 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3745 SUBREG_REG (*split)));
3746 else
3747 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3748 SUBREG_REG (*split)));
3749 }
3750 #endif
3751
3752 /* Attempt to split binary operators using arithmetic identities. */
3753 if (BINARY_P (SET_SRC (newpat))
3754 && split_mode == GET_MODE (SET_SRC (newpat))
3755 && ! side_effects_p (SET_SRC (newpat)))
3756 {
3757 rtx setsrc = SET_SRC (newpat);
3758 machine_mode mode = GET_MODE (setsrc);
3759 enum rtx_code code = GET_CODE (setsrc);
3760 rtx src_op0 = XEXP (setsrc, 0);
3761 rtx src_op1 = XEXP (setsrc, 1);
3762
3763 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3764 if (rtx_equal_p (src_op0, src_op1))
3765 {
3766 newi2pat = gen_rtx_SET (newdest, src_op0);
3767 SUBST (XEXP (setsrc, 0), newdest);
3768 SUBST (XEXP (setsrc, 1), newdest);
3769 subst_done = true;
3770 }
3771 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3772 else if ((code == PLUS || code == MULT)
3773 && GET_CODE (src_op0) == code
3774 && GET_CODE (XEXP (src_op0, 0)) == code
3775 && (INTEGRAL_MODE_P (mode)
3776 || (FLOAT_MODE_P (mode)
3777 && flag_unsafe_math_optimizations)))
3778 {
3779 rtx p = XEXP (XEXP (src_op0, 0), 0);
3780 rtx q = XEXP (XEXP (src_op0, 0), 1);
3781 rtx r = XEXP (src_op0, 1);
3782 rtx s = src_op1;
3783
3784 /* Split both "((X op Y) op X) op Y" and
3785 "((X op Y) op Y) op X" as "T op T" where T is
3786 "X op Y". */
3787 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3788 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3789 {
3790 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3791 SUBST (XEXP (setsrc, 0), newdest);
3792 SUBST (XEXP (setsrc, 1), newdest);
3793 subst_done = true;
3794 }
3795 /* Split "((X op X) op Y) op Y)" as "T op T" where
3796 T is "X op Y". */
3797 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3798 {
3799 rtx tmp = simplify_gen_binary (code, mode, p, r);
3800 newi2pat = gen_rtx_SET (newdest, tmp);
3801 SUBST (XEXP (setsrc, 0), newdest);
3802 SUBST (XEXP (setsrc, 1), newdest);
3803 subst_done = true;
3804 }
3805 }
3806 }
3807
3808 if (!subst_done)
3809 {
3810 newi2pat = gen_rtx_SET (newdest, *split);
3811 SUBST (*split, newdest);
3812 }
3813
3814 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3815
3816 /* recog_for_combine might have added CLOBBERs to newi2pat.
3817 Make sure NEWPAT does not depend on the clobbered regs. */
3818 if (GET_CODE (newi2pat) == PARALLEL)
3819 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3820 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3821 {
3822 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3823 if (reg_overlap_mentioned_p (reg, newpat))
3824 {
3825 undo_all ();
3826 return 0;
3827 }
3828 }
3829
3830 /* If the split point was a MULT and we didn't have one before,
3831 don't use one now. */
3832 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3833 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3834 }
3835 }
3836
3837 /* Check for a case where we loaded from memory in a narrow mode and
3838 then sign extended it, but we need both registers. In that case,
3839 we have a PARALLEL with both loads from the same memory location.
3840 We can split this into a load from memory followed by a register-register
3841 copy. This saves at least one insn, more if register allocation can
3842 eliminate the copy.
3843
3844 We cannot do this if the destination of the first assignment is a
3845 condition code register or cc0. We eliminate this case by making sure
3846 the SET_DEST and SET_SRC have the same mode.
3847
3848 We cannot do this if the destination of the second assignment is
3849 a register that we have already assumed is zero-extended. Similarly
3850 for a SUBREG of such a register. */
3851
3852 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3853 && GET_CODE (newpat) == PARALLEL
3854 && XVECLEN (newpat, 0) == 2
3855 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3856 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3857 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3858 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3859 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3860 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3861 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3862 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3863 DF_INSN_LUID (i2))
3864 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3865 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3866 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3867 (REG_P (temp_expr)
3868 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3869 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3870 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3871 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3872 != GET_MODE_MASK (word_mode))))
3873 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3874 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3875 (REG_P (temp_expr)
3876 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3877 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3878 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3879 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3880 != GET_MODE_MASK (word_mode)))))
3881 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3882 SET_SRC (XVECEXP (newpat, 0, 1)))
3883 && ! find_reg_note (i3, REG_UNUSED,
3884 SET_DEST (XVECEXP (newpat, 0, 0))))
3885 {
3886 rtx ni2dest;
3887
3888 newi2pat = XVECEXP (newpat, 0, 0);
3889 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3890 newpat = XVECEXP (newpat, 0, 1);
3891 SUBST (SET_SRC (newpat),
3892 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3893 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3894
3895 if (i2_code_number >= 0)
3896 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3897
3898 if (insn_code_number >= 0)
3899 swap_i2i3 = 1;
3900 }
3901
3902 /* Similarly, check for a case where we have a PARALLEL of two independent
3903 SETs but we started with three insns. In this case, we can do the sets
3904 as two separate insns. This case occurs when some SET allows two
3905 other insns to combine, but the destination of that SET is still live.
3906
3907 Also do this if we started with two insns and (at least) one of the
3908 resulting sets is a noop; this noop will be deleted later. */
3909
3910 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3911 && GET_CODE (newpat) == PARALLEL
3912 && XVECLEN (newpat, 0) == 2
3913 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3914 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3915 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3916 || set_noop_p (XVECEXP (newpat, 0, 1)))
3917 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3918 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3919 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3920 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3921 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3922 XVECEXP (newpat, 0, 0))
3923 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3924 XVECEXP (newpat, 0, 1))
3925 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3926 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3927 {
3928 rtx set0 = XVECEXP (newpat, 0, 0);
3929 rtx set1 = XVECEXP (newpat, 0, 1);
3930
3931 /* Normally, it doesn't matter which of the two is done first,
3932 but the one that references cc0 can't be the second, and
3933 one which uses any regs/memory set in between i2 and i3 can't
3934 be first. The PARALLEL might also have been pre-existing in i3,
3935 so we need to make sure that we won't wrongly hoist a SET to i2
3936 that would conflict with a death note present in there. */
3937 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3938 && !(REG_P (SET_DEST (set1))
3939 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3940 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3941 && find_reg_note (i2, REG_DEAD,
3942 SUBREG_REG (SET_DEST (set1))))
3943 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3944 /* If I3 is a jump, ensure that set0 is a jump so that
3945 we do not create invalid RTL. */
3946 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3947 )
3948 {
3949 newi2pat = set1;
3950 newpat = set0;
3951 }
3952 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3953 && !(REG_P (SET_DEST (set0))
3954 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3955 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3956 && find_reg_note (i2, REG_DEAD,
3957 SUBREG_REG (SET_DEST (set0))))
3958 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
3959 /* If I3 is a jump, ensure that set1 is a jump so that
3960 we do not create invalid RTL. */
3961 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3962 )
3963 {
3964 newi2pat = set0;
3965 newpat = set1;
3966 }
3967 else
3968 {
3969 undo_all ();
3970 return 0;
3971 }
3972
3973 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3974
3975 if (i2_code_number >= 0)
3976 {
3977 /* recog_for_combine might have added CLOBBERs to newi2pat.
3978 Make sure NEWPAT does not depend on the clobbered regs. */
3979 if (GET_CODE (newi2pat) == PARALLEL)
3980 {
3981 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3982 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3983 {
3984 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3985 if (reg_overlap_mentioned_p (reg, newpat))
3986 {
3987 undo_all ();
3988 return 0;
3989 }
3990 }
3991 }
3992
3993 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3994 }
3995 }
3996
3997 /* If it still isn't recognized, fail and change things back the way they
3998 were. */
3999 if ((insn_code_number < 0
4000 /* Is the result a reasonable ASM_OPERANDS? */
4001 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4002 {
4003 undo_all ();
4004 return 0;
4005 }
4006
4007 /* If we had to change another insn, make sure it is valid also. */
4008 if (undobuf.other_insn)
4009 {
4010 CLEAR_HARD_REG_SET (newpat_used_regs);
4011
4012 other_pat = PATTERN (undobuf.other_insn);
4013 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4014 &new_other_notes);
4015
4016 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4017 {
4018 undo_all ();
4019 return 0;
4020 }
4021 }
4022
4023 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4024 they are adjacent to each other or not. */
4025 if (HAVE_cc0)
4026 {
4027 rtx_insn *p = prev_nonnote_insn (i3);
4028 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4029 && sets_cc0_p (newi2pat))
4030 {
4031 undo_all ();
4032 return 0;
4033 }
4034 }
4035
4036 /* Only allow this combination if insn_rtx_costs reports that the
4037 replacement instructions are cheaper than the originals. */
4038 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4039 {
4040 undo_all ();
4041 return 0;
4042 }
4043
4044 if (MAY_HAVE_DEBUG_INSNS)
4045 {
4046 struct undo *undo;
4047
4048 for (undo = undobuf.undos; undo; undo = undo->next)
4049 if (undo->kind == UNDO_MODE)
4050 {
4051 rtx reg = *undo->where.r;
4052 machine_mode new_mode = GET_MODE (reg);
4053 machine_mode old_mode = undo->old_contents.m;
4054
4055 /* Temporarily revert mode back. */
4056 adjust_reg_mode (reg, old_mode);
4057
4058 if (reg == i2dest && i2scratch)
4059 {
4060 /* If we used i2dest as a scratch register with a
4061 different mode, substitute it for the original
4062 i2src while its original mode is temporarily
4063 restored, and then clear i2scratch so that we don't
4064 do it again later. */
4065 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4066 this_basic_block);
4067 i2scratch = false;
4068 /* Put back the new mode. */
4069 adjust_reg_mode (reg, new_mode);
4070 }
4071 else
4072 {
4073 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4074 rtx_insn *first, *last;
4075
4076 if (reg == i2dest)
4077 {
4078 first = i2;
4079 last = last_combined_insn;
4080 }
4081 else
4082 {
4083 first = i3;
4084 last = undobuf.other_insn;
4085 gcc_assert (last);
4086 if (DF_INSN_LUID (last)
4087 < DF_INSN_LUID (last_combined_insn))
4088 last = last_combined_insn;
4089 }
4090
4091 /* We're dealing with a reg that changed mode but not
4092 meaning, so we want to turn it into a subreg for
4093 the new mode. However, because of REG sharing and
4094 because its mode had already changed, we have to do
4095 it in two steps. First, replace any debug uses of
4096 reg, with its original mode temporarily restored,
4097 with this copy we have created; then, replace the
4098 copy with the SUBREG of the original shared reg,
4099 once again changed to the new mode. */
4100 propagate_for_debug (first, last, reg, tempreg,
4101 this_basic_block);
4102 adjust_reg_mode (reg, new_mode);
4103 propagate_for_debug (first, last, tempreg,
4104 lowpart_subreg (old_mode, reg, new_mode),
4105 this_basic_block);
4106 }
4107 }
4108 }
4109
4110 /* If we will be able to accept this, we have made a
4111 change to the destination of I3. This requires us to
4112 do a few adjustments. */
4113
4114 if (changed_i3_dest)
4115 {
4116 PATTERN (i3) = newpat;
4117 adjust_for_new_dest (i3);
4118 }
4119
4120 /* We now know that we can do this combination. Merge the insns and
4121 update the status of registers and LOG_LINKS. */
4122
4123 if (undobuf.other_insn)
4124 {
4125 rtx note, next;
4126
4127 PATTERN (undobuf.other_insn) = other_pat;
4128
4129 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4130 ensure that they are still valid. Then add any non-duplicate
4131 notes added by recog_for_combine. */
4132 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4133 {
4134 next = XEXP (note, 1);
4135
4136 if ((REG_NOTE_KIND (note) == REG_DEAD
4137 && !reg_referenced_p (XEXP (note, 0),
4138 PATTERN (undobuf.other_insn)))
4139 ||(REG_NOTE_KIND (note) == REG_UNUSED
4140 && !reg_set_p (XEXP (note, 0),
4141 PATTERN (undobuf.other_insn))))
4142 remove_note (undobuf.other_insn, note);
4143 }
4144
4145 distribute_notes (new_other_notes, undobuf.other_insn,
4146 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4147 NULL_RTX);
4148 }
4149
4150 if (swap_i2i3)
4151 {
4152 rtx_insn *insn;
4153 struct insn_link *link;
4154 rtx ni2dest;
4155
4156 /* I3 now uses what used to be its destination and which is now
4157 I2's destination. This requires us to do a few adjustments. */
4158 PATTERN (i3) = newpat;
4159 adjust_for_new_dest (i3);
4160
4161 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4162 so we still will.
4163
4164 However, some later insn might be using I2's dest and have
4165 a LOG_LINK pointing at I3. We must remove this link.
4166 The simplest way to remove the link is to point it at I1,
4167 which we know will be a NOTE. */
4168
4169 /* newi2pat is usually a SET here; however, recog_for_combine might
4170 have added some clobbers. */
4171 if (GET_CODE (newi2pat) == PARALLEL)
4172 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4173 else
4174 ni2dest = SET_DEST (newi2pat);
4175
4176 for (insn = NEXT_INSN (i3);
4177 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4178 || insn != BB_HEAD (this_basic_block->next_bb));
4179 insn = NEXT_INSN (insn))
4180 {
4181 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
4182 {
4183 FOR_EACH_LOG_LINK (link, insn)
4184 if (link->insn == i3)
4185 link->insn = i1;
4186
4187 break;
4188 }
4189 }
4190 }
4191
4192 {
4193 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4194 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4195 rtx midnotes = 0;
4196 int from_luid;
4197 /* Compute which registers we expect to eliminate. newi2pat may be setting
4198 either i3dest or i2dest, so we must check it. */
4199 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4200 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4201 || !i2dest_killed
4202 ? 0 : i2dest);
4203 /* For i1, we need to compute both local elimination and global
4204 elimination information with respect to newi2pat because i1dest
4205 may be the same as i3dest, in which case newi2pat may be setting
4206 i1dest. Global information is used when distributing REG_DEAD
4207 note for i2 and i3, in which case it does matter if newi2pat sets
4208 i1dest or not.
4209
4210 Local information is used when distributing REG_DEAD note for i1,
4211 in which case it doesn't matter if newi2pat sets i1dest or not.
4212 See PR62151, if we have four insns combination:
4213 i0: r0 <- i0src
4214 i1: r1 <- i1src (using r0)
4215 REG_DEAD (r0)
4216 i2: r0 <- i2src (using r1)
4217 i3: r3 <- i3src (using r0)
4218 ix: using r0
4219 From i1's point of view, r0 is eliminated, no matter if it is set
4220 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4221 should be discarded.
4222
4223 Note local information only affects cases in forms like "I1->I2->I3",
4224 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4225 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4226 i0dest anyway. */
4227 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4228 || !i1dest_killed
4229 ? 0 : i1dest);
4230 rtx elim_i1 = (local_elim_i1 == 0
4231 || (newi2pat && reg_set_p (i1dest, newi2pat))
4232 ? 0 : i1dest);
4233 /* Same case as i1. */
4234 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4235 ? 0 : i0dest);
4236 rtx elim_i0 = (local_elim_i0 == 0
4237 || (newi2pat && reg_set_p (i0dest, newi2pat))
4238 ? 0 : i0dest);
4239
4240 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4241 clear them. */
4242 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4243 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4244 if (i1)
4245 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4246 if (i0)
4247 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4248
4249 /* Ensure that we do not have something that should not be shared but
4250 occurs multiple times in the new insns. Check this by first
4251 resetting all the `used' flags and then copying anything is shared. */
4252
4253 reset_used_flags (i3notes);
4254 reset_used_flags (i2notes);
4255 reset_used_flags (i1notes);
4256 reset_used_flags (i0notes);
4257 reset_used_flags (newpat);
4258 reset_used_flags (newi2pat);
4259 if (undobuf.other_insn)
4260 reset_used_flags (PATTERN (undobuf.other_insn));
4261
4262 i3notes = copy_rtx_if_shared (i3notes);
4263 i2notes = copy_rtx_if_shared (i2notes);
4264 i1notes = copy_rtx_if_shared (i1notes);
4265 i0notes = copy_rtx_if_shared (i0notes);
4266 newpat = copy_rtx_if_shared (newpat);
4267 newi2pat = copy_rtx_if_shared (newi2pat);
4268 if (undobuf.other_insn)
4269 reset_used_flags (PATTERN (undobuf.other_insn));
4270
4271 INSN_CODE (i3) = insn_code_number;
4272 PATTERN (i3) = newpat;
4273
4274 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4275 {
4276 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4277
4278 reset_used_flags (call_usage);
4279 call_usage = copy_rtx (call_usage);
4280
4281 if (substed_i2)
4282 {
4283 /* I2SRC must still be meaningful at this point. Some splitting
4284 operations can invalidate I2SRC, but those operations do not
4285 apply to calls. */
4286 gcc_assert (i2src);
4287 replace_rtx (call_usage, i2dest, i2src);
4288 }
4289
4290 if (substed_i1)
4291 replace_rtx (call_usage, i1dest, i1src);
4292 if (substed_i0)
4293 replace_rtx (call_usage, i0dest, i0src);
4294
4295 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4296 }
4297
4298 if (undobuf.other_insn)
4299 INSN_CODE (undobuf.other_insn) = other_code_number;
4300
4301 /* We had one special case above where I2 had more than one set and
4302 we replaced a destination of one of those sets with the destination
4303 of I3. In that case, we have to update LOG_LINKS of insns later
4304 in this basic block. Note that this (expensive) case is rare.
4305
4306 Also, in this case, we must pretend that all REG_NOTEs for I2
4307 actually came from I3, so that REG_UNUSED notes from I2 will be
4308 properly handled. */
4309
4310 if (i3_subst_into_i2)
4311 {
4312 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4313 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4314 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4315 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4316 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4317 && ! find_reg_note (i2, REG_UNUSED,
4318 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4319 for (temp_insn = NEXT_INSN (i2);
4320 temp_insn
4321 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4322 || BB_HEAD (this_basic_block) != temp_insn);
4323 temp_insn = NEXT_INSN (temp_insn))
4324 if (temp_insn != i3 && INSN_P (temp_insn))
4325 FOR_EACH_LOG_LINK (link, temp_insn)
4326 if (link->insn == i2)
4327 link->insn = i3;
4328
4329 if (i3notes)
4330 {
4331 rtx link = i3notes;
4332 while (XEXP (link, 1))
4333 link = XEXP (link, 1);
4334 XEXP (link, 1) = i2notes;
4335 }
4336 else
4337 i3notes = i2notes;
4338 i2notes = 0;
4339 }
4340
4341 LOG_LINKS (i3) = NULL;
4342 REG_NOTES (i3) = 0;
4343 LOG_LINKS (i2) = NULL;
4344 REG_NOTES (i2) = 0;
4345
4346 if (newi2pat)
4347 {
4348 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4349 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4350 this_basic_block);
4351 INSN_CODE (i2) = i2_code_number;
4352 PATTERN (i2) = newi2pat;
4353 }
4354 else
4355 {
4356 if (MAY_HAVE_DEBUG_INSNS && i2src)
4357 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4358 this_basic_block);
4359 SET_INSN_DELETED (i2);
4360 }
4361
4362 if (i1)
4363 {
4364 LOG_LINKS (i1) = NULL;
4365 REG_NOTES (i1) = 0;
4366 if (MAY_HAVE_DEBUG_INSNS)
4367 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4368 this_basic_block);
4369 SET_INSN_DELETED (i1);
4370 }
4371
4372 if (i0)
4373 {
4374 LOG_LINKS (i0) = NULL;
4375 REG_NOTES (i0) = 0;
4376 if (MAY_HAVE_DEBUG_INSNS)
4377 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4378 this_basic_block);
4379 SET_INSN_DELETED (i0);
4380 }
4381
4382 /* Get death notes for everything that is now used in either I3 or
4383 I2 and used to die in a previous insn. If we built two new
4384 patterns, move from I1 to I2 then I2 to I3 so that we get the
4385 proper movement on registers that I2 modifies. */
4386
4387 if (i0)
4388 from_luid = DF_INSN_LUID (i0);
4389 else if (i1)
4390 from_luid = DF_INSN_LUID (i1);
4391 else
4392 from_luid = DF_INSN_LUID (i2);
4393 if (newi2pat)
4394 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4395 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4396
4397 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4398 if (i3notes)
4399 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4400 elim_i2, elim_i1, elim_i0);
4401 if (i2notes)
4402 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4403 elim_i2, elim_i1, elim_i0);
4404 if (i1notes)
4405 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4406 elim_i2, local_elim_i1, local_elim_i0);
4407 if (i0notes)
4408 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4409 elim_i2, elim_i1, local_elim_i0);
4410 if (midnotes)
4411 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4412 elim_i2, elim_i1, elim_i0);
4413
4414 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4415 know these are REG_UNUSED and want them to go to the desired insn,
4416 so we always pass it as i3. */
4417
4418 if (newi2pat && new_i2_notes)
4419 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4420 NULL_RTX);
4421
4422 if (new_i3_notes)
4423 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4424 NULL_RTX);
4425
4426 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4427 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4428 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4429 in that case, it might delete I2. Similarly for I2 and I1.
4430 Show an additional death due to the REG_DEAD note we make here. If
4431 we discard it in distribute_notes, we will decrement it again. */
4432
4433 if (i3dest_killed)
4434 {
4435 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4436 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4437 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4438 elim_i1, elim_i0);
4439 else
4440 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4441 elim_i2, elim_i1, elim_i0);
4442 }
4443
4444 if (i2dest_in_i2src)
4445 {
4446 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4447 if (newi2pat && reg_set_p (i2dest, newi2pat))
4448 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4449 NULL_RTX, NULL_RTX);
4450 else
4451 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4452 NULL_RTX, NULL_RTX, NULL_RTX);
4453 }
4454
4455 if (i1dest_in_i1src)
4456 {
4457 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4458 if (newi2pat && reg_set_p (i1dest, newi2pat))
4459 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4460 NULL_RTX, NULL_RTX);
4461 else
4462 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4463 NULL_RTX, NULL_RTX, NULL_RTX);
4464 }
4465
4466 if (i0dest_in_i0src)
4467 {
4468 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4469 if (newi2pat && reg_set_p (i0dest, newi2pat))
4470 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4471 NULL_RTX, NULL_RTX);
4472 else
4473 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4474 NULL_RTX, NULL_RTX, NULL_RTX);
4475 }
4476
4477 distribute_links (i3links);
4478 distribute_links (i2links);
4479 distribute_links (i1links);
4480 distribute_links (i0links);
4481
4482 if (REG_P (i2dest))
4483 {
4484 struct insn_link *link;
4485 rtx_insn *i2_insn = 0;
4486 rtx i2_val = 0, set;
4487
4488 /* The insn that used to set this register doesn't exist, and
4489 this life of the register may not exist either. See if one of
4490 I3's links points to an insn that sets I2DEST. If it does,
4491 that is now the last known value for I2DEST. If we don't update
4492 this and I2 set the register to a value that depended on its old
4493 contents, we will get confused. If this insn is used, thing
4494 will be set correctly in combine_instructions. */
4495 FOR_EACH_LOG_LINK (link, i3)
4496 if ((set = single_set (link->insn)) != 0
4497 && rtx_equal_p (i2dest, SET_DEST (set)))
4498 i2_insn = link->insn, i2_val = SET_SRC (set);
4499
4500 record_value_for_reg (i2dest, i2_insn, i2_val);
4501
4502 /* If the reg formerly set in I2 died only once and that was in I3,
4503 zero its use count so it won't make `reload' do any work. */
4504 if (! added_sets_2
4505 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4506 && ! i2dest_in_i2src
4507 && REGNO (i2dest) < reg_n_sets_max)
4508 INC_REG_N_SETS (REGNO (i2dest), -1);
4509 }
4510
4511 if (i1 && REG_P (i1dest))
4512 {
4513 struct insn_link *link;
4514 rtx_insn *i1_insn = 0;
4515 rtx i1_val = 0, set;
4516
4517 FOR_EACH_LOG_LINK (link, i3)
4518 if ((set = single_set (link->insn)) != 0
4519 && rtx_equal_p (i1dest, SET_DEST (set)))
4520 i1_insn = link->insn, i1_val = SET_SRC (set);
4521
4522 record_value_for_reg (i1dest, i1_insn, i1_val);
4523
4524 if (! added_sets_1
4525 && ! i1dest_in_i1src
4526 && REGNO (i1dest) < reg_n_sets_max)
4527 INC_REG_N_SETS (REGNO (i1dest), -1);
4528 }
4529
4530 if (i0 && REG_P (i0dest))
4531 {
4532 struct insn_link *link;
4533 rtx_insn *i0_insn = 0;
4534 rtx i0_val = 0, set;
4535
4536 FOR_EACH_LOG_LINK (link, i3)
4537 if ((set = single_set (link->insn)) != 0
4538 && rtx_equal_p (i0dest, SET_DEST (set)))
4539 i0_insn = link->insn, i0_val = SET_SRC (set);
4540
4541 record_value_for_reg (i0dest, i0_insn, i0_val);
4542
4543 if (! added_sets_0
4544 && ! i0dest_in_i0src
4545 && REGNO (i0dest) < reg_n_sets_max)
4546 INC_REG_N_SETS (REGNO (i0dest), -1);
4547 }
4548
4549 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4550 been made to this insn. The order is important, because newi2pat
4551 can affect nonzero_bits of newpat. */
4552 if (newi2pat)
4553 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4554 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4555 }
4556
4557 if (undobuf.other_insn != NULL_RTX)
4558 {
4559 if (dump_file)
4560 {
4561 fprintf (dump_file, "modifying other_insn ");
4562 dump_insn_slim (dump_file, undobuf.other_insn);
4563 }
4564 df_insn_rescan (undobuf.other_insn);
4565 }
4566
4567 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4568 {
4569 if (dump_file)
4570 {
4571 fprintf (dump_file, "modifying insn i0 ");
4572 dump_insn_slim (dump_file, i0);
4573 }
4574 df_insn_rescan (i0);
4575 }
4576
4577 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4578 {
4579 if (dump_file)
4580 {
4581 fprintf (dump_file, "modifying insn i1 ");
4582 dump_insn_slim (dump_file, i1);
4583 }
4584 df_insn_rescan (i1);
4585 }
4586
4587 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4588 {
4589 if (dump_file)
4590 {
4591 fprintf (dump_file, "modifying insn i2 ");
4592 dump_insn_slim (dump_file, i2);
4593 }
4594 df_insn_rescan (i2);
4595 }
4596
4597 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4598 {
4599 if (dump_file)
4600 {
4601 fprintf (dump_file, "modifying insn i3 ");
4602 dump_insn_slim (dump_file, i3);
4603 }
4604 df_insn_rescan (i3);
4605 }
4606
4607 /* Set new_direct_jump_p if a new return or simple jump instruction
4608 has been created. Adjust the CFG accordingly. */
4609 if (returnjump_p (i3) || any_uncondjump_p (i3))
4610 {
4611 *new_direct_jump_p = 1;
4612 mark_jump_label (PATTERN (i3), i3, 0);
4613 update_cfg_for_uncondjump (i3);
4614 }
4615
4616 if (undobuf.other_insn != NULL_RTX
4617 && (returnjump_p (undobuf.other_insn)
4618 || any_uncondjump_p (undobuf.other_insn)))
4619 {
4620 *new_direct_jump_p = 1;
4621 update_cfg_for_uncondjump (undobuf.other_insn);
4622 }
4623
4624 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4625 && XEXP (PATTERN (i3), 0) == const1_rtx)
4626 {
4627 basic_block bb = BLOCK_FOR_INSN (i3);
4628 gcc_assert (bb);
4629 remove_edge (split_block (bb, i3));
4630 emit_barrier_after_bb (bb);
4631 *new_direct_jump_p = 1;
4632 }
4633
4634 if (undobuf.other_insn
4635 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4636 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4637 {
4638 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4639 gcc_assert (bb);
4640 remove_edge (split_block (bb, undobuf.other_insn));
4641 emit_barrier_after_bb (bb);
4642 *new_direct_jump_p = 1;
4643 }
4644
4645 /* A noop might also need cleaning up of CFG, if it comes from the
4646 simplification of a jump. */
4647 if (JUMP_P (i3)
4648 && GET_CODE (newpat) == SET
4649 && SET_SRC (newpat) == pc_rtx
4650 && SET_DEST (newpat) == pc_rtx)
4651 {
4652 *new_direct_jump_p = 1;
4653 update_cfg_for_uncondjump (i3);
4654 }
4655
4656 if (undobuf.other_insn != NULL_RTX
4657 && JUMP_P (undobuf.other_insn)
4658 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4659 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4660 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4661 {
4662 *new_direct_jump_p = 1;
4663 update_cfg_for_uncondjump (undobuf.other_insn);
4664 }
4665
4666 combine_successes++;
4667 undo_commit ();
4668
4669 if (added_links_insn
4670 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4671 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4672 return added_links_insn;
4673 else
4674 return newi2pat ? i2 : i3;
4675 }
4676 \f
4677 /* Get a marker for undoing to the current state. */
4678
4679 static void *
4680 get_undo_marker (void)
4681 {
4682 return undobuf.undos;
4683 }
4684
4685 /* Undo the modifications up to the marker. */
4686
4687 static void
4688 undo_to_marker (void *marker)
4689 {
4690 struct undo *undo, *next;
4691
4692 for (undo = undobuf.undos; undo != marker; undo = next)
4693 {
4694 gcc_assert (undo);
4695
4696 next = undo->next;
4697 switch (undo->kind)
4698 {
4699 case UNDO_RTX:
4700 *undo->where.r = undo->old_contents.r;
4701 break;
4702 case UNDO_INT:
4703 *undo->where.i = undo->old_contents.i;
4704 break;
4705 case UNDO_MODE:
4706 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4707 break;
4708 case UNDO_LINKS:
4709 *undo->where.l = undo->old_contents.l;
4710 break;
4711 default:
4712 gcc_unreachable ();
4713 }
4714
4715 undo->next = undobuf.frees;
4716 undobuf.frees = undo;
4717 }
4718
4719 undobuf.undos = (struct undo *) marker;
4720 }
4721
4722 /* Undo all the modifications recorded in undobuf. */
4723
4724 static void
4725 undo_all (void)
4726 {
4727 undo_to_marker (0);
4728 }
4729
4730 /* We've committed to accepting the changes we made. Move all
4731 of the undos to the free list. */
4732
4733 static void
4734 undo_commit (void)
4735 {
4736 struct undo *undo, *next;
4737
4738 for (undo = undobuf.undos; undo; undo = next)
4739 {
4740 next = undo->next;
4741 undo->next = undobuf.frees;
4742 undobuf.frees = undo;
4743 }
4744 undobuf.undos = 0;
4745 }
4746 \f
4747 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4748 where we have an arithmetic expression and return that point. LOC will
4749 be inside INSN.
4750
4751 try_combine will call this function to see if an insn can be split into
4752 two insns. */
4753
4754 static rtx *
4755 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4756 {
4757 rtx x = *loc;
4758 enum rtx_code code = GET_CODE (x);
4759 rtx *split;
4760 unsigned HOST_WIDE_INT len = 0;
4761 HOST_WIDE_INT pos = 0;
4762 int unsignedp = 0;
4763 rtx inner = NULL_RTX;
4764
4765 /* First special-case some codes. */
4766 switch (code)
4767 {
4768 case SUBREG:
4769 #ifdef INSN_SCHEDULING
4770 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4771 point. */
4772 if (MEM_P (SUBREG_REG (x)))
4773 return loc;
4774 #endif
4775 return find_split_point (&SUBREG_REG (x), insn, false);
4776
4777 case MEM:
4778 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4779 using LO_SUM and HIGH. */
4780 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4781 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4782 {
4783 machine_mode address_mode = get_address_mode (x);
4784
4785 SUBST (XEXP (x, 0),
4786 gen_rtx_LO_SUM (address_mode,
4787 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4788 XEXP (x, 0)));
4789 return &XEXP (XEXP (x, 0), 0);
4790 }
4791
4792 /* If we have a PLUS whose second operand is a constant and the
4793 address is not valid, perhaps will can split it up using
4794 the machine-specific way to split large constants. We use
4795 the first pseudo-reg (one of the virtual regs) as a placeholder;
4796 it will not remain in the result. */
4797 if (GET_CODE (XEXP (x, 0)) == PLUS
4798 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4799 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4800 MEM_ADDR_SPACE (x)))
4801 {
4802 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4803 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4804 subst_insn);
4805
4806 /* This should have produced two insns, each of which sets our
4807 placeholder. If the source of the second is a valid address,
4808 we can make put both sources together and make a split point
4809 in the middle. */
4810
4811 if (seq
4812 && NEXT_INSN (seq) != NULL_RTX
4813 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4814 && NONJUMP_INSN_P (seq)
4815 && GET_CODE (PATTERN (seq)) == SET
4816 && SET_DEST (PATTERN (seq)) == reg
4817 && ! reg_mentioned_p (reg,
4818 SET_SRC (PATTERN (seq)))
4819 && NONJUMP_INSN_P (NEXT_INSN (seq))
4820 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4821 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4822 && memory_address_addr_space_p
4823 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4824 MEM_ADDR_SPACE (x)))
4825 {
4826 rtx src1 = SET_SRC (PATTERN (seq));
4827 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4828
4829 /* Replace the placeholder in SRC2 with SRC1. If we can
4830 find where in SRC2 it was placed, that can become our
4831 split point and we can replace this address with SRC2.
4832 Just try two obvious places. */
4833
4834 src2 = replace_rtx (src2, reg, src1);
4835 split = 0;
4836 if (XEXP (src2, 0) == src1)
4837 split = &XEXP (src2, 0);
4838 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4839 && XEXP (XEXP (src2, 0), 0) == src1)
4840 split = &XEXP (XEXP (src2, 0), 0);
4841
4842 if (split)
4843 {
4844 SUBST (XEXP (x, 0), src2);
4845 return split;
4846 }
4847 }
4848
4849 /* If that didn't work, perhaps the first operand is complex and
4850 needs to be computed separately, so make a split point there.
4851 This will occur on machines that just support REG + CONST
4852 and have a constant moved through some previous computation. */
4853
4854 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4855 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4856 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4857 return &XEXP (XEXP (x, 0), 0);
4858 }
4859
4860 /* If we have a PLUS whose first operand is complex, try computing it
4861 separately by making a split there. */
4862 if (GET_CODE (XEXP (x, 0)) == PLUS
4863 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4864 MEM_ADDR_SPACE (x))
4865 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4866 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4867 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4868 return &XEXP (XEXP (x, 0), 0);
4869 break;
4870
4871 case SET:
4872 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4873 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4874 we need to put the operand into a register. So split at that
4875 point. */
4876
4877 if (SET_DEST (x) == cc0_rtx
4878 && GET_CODE (SET_SRC (x)) != COMPARE
4879 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4880 && !OBJECT_P (SET_SRC (x))
4881 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4882 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4883 return &SET_SRC (x);
4884
4885 /* See if we can split SET_SRC as it stands. */
4886 split = find_split_point (&SET_SRC (x), insn, true);
4887 if (split && split != &SET_SRC (x))
4888 return split;
4889
4890 /* See if we can split SET_DEST as it stands. */
4891 split = find_split_point (&SET_DEST (x), insn, false);
4892 if (split && split != &SET_DEST (x))
4893 return split;
4894
4895 /* See if this is a bitfield assignment with everything constant. If
4896 so, this is an IOR of an AND, so split it into that. */
4897 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4898 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4899 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4900 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4901 && CONST_INT_P (SET_SRC (x))
4902 && ((INTVAL (XEXP (SET_DEST (x), 1))
4903 + INTVAL (XEXP (SET_DEST (x), 2)))
4904 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4905 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4906 {
4907 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4908 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4909 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4910 rtx dest = XEXP (SET_DEST (x), 0);
4911 machine_mode mode = GET_MODE (dest);
4912 unsigned HOST_WIDE_INT mask
4913 = (HOST_WIDE_INT_1U << len) - 1;
4914 rtx or_mask;
4915
4916 if (BITS_BIG_ENDIAN)
4917 pos = GET_MODE_PRECISION (mode) - len - pos;
4918
4919 or_mask = gen_int_mode (src << pos, mode);
4920 if (src == mask)
4921 SUBST (SET_SRC (x),
4922 simplify_gen_binary (IOR, mode, dest, or_mask));
4923 else
4924 {
4925 rtx negmask = gen_int_mode (~(mask << pos), mode);
4926 SUBST (SET_SRC (x),
4927 simplify_gen_binary (IOR, mode,
4928 simplify_gen_binary (AND, mode,
4929 dest, negmask),
4930 or_mask));
4931 }
4932
4933 SUBST (SET_DEST (x), dest);
4934
4935 split = find_split_point (&SET_SRC (x), insn, true);
4936 if (split && split != &SET_SRC (x))
4937 return split;
4938 }
4939
4940 /* Otherwise, see if this is an operation that we can split into two.
4941 If so, try to split that. */
4942 code = GET_CODE (SET_SRC (x));
4943
4944 switch (code)
4945 {
4946 case AND:
4947 /* If we are AND'ing with a large constant that is only a single
4948 bit and the result is only being used in a context where we
4949 need to know if it is zero or nonzero, replace it with a bit
4950 extraction. This will avoid the large constant, which might
4951 have taken more than one insn to make. If the constant were
4952 not a valid argument to the AND but took only one insn to make,
4953 this is no worse, but if it took more than one insn, it will
4954 be better. */
4955
4956 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4957 && REG_P (XEXP (SET_SRC (x), 0))
4958 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4959 && REG_P (SET_DEST (x))
4960 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
4961 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4962 && XEXP (*split, 0) == SET_DEST (x)
4963 && XEXP (*split, 1) == const0_rtx)
4964 {
4965 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4966 XEXP (SET_SRC (x), 0),
4967 pos, NULL_RTX, 1, 1, 0, 0);
4968 if (extraction != 0)
4969 {
4970 SUBST (SET_SRC (x), extraction);
4971 return find_split_point (loc, insn, false);
4972 }
4973 }
4974 break;
4975
4976 case NE:
4977 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4978 is known to be on, this can be converted into a NEG of a shift. */
4979 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4980 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4981 && 1 <= (pos = exact_log2
4982 (nonzero_bits (XEXP (SET_SRC (x), 0),
4983 GET_MODE (XEXP (SET_SRC (x), 0))))))
4984 {
4985 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4986
4987 SUBST (SET_SRC (x),
4988 gen_rtx_NEG (mode,
4989 gen_rtx_LSHIFTRT (mode,
4990 XEXP (SET_SRC (x), 0),
4991 GEN_INT (pos))));
4992
4993 split = find_split_point (&SET_SRC (x), insn, true);
4994 if (split && split != &SET_SRC (x))
4995 return split;
4996 }
4997 break;
4998
4999 case SIGN_EXTEND:
5000 inner = XEXP (SET_SRC (x), 0);
5001
5002 /* We can't optimize if either mode is a partial integer
5003 mode as we don't know how many bits are significant
5004 in those modes. */
5005 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
5006 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5007 break;
5008
5009 pos = 0;
5010 len = GET_MODE_PRECISION (GET_MODE (inner));
5011 unsignedp = 0;
5012 break;
5013
5014 case SIGN_EXTRACT:
5015 case ZERO_EXTRACT:
5016 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5017 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5018 {
5019 inner = XEXP (SET_SRC (x), 0);
5020 len = INTVAL (XEXP (SET_SRC (x), 1));
5021 pos = INTVAL (XEXP (SET_SRC (x), 2));
5022
5023 if (BITS_BIG_ENDIAN)
5024 pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
5025 unsignedp = (code == ZERO_EXTRACT);
5026 }
5027 break;
5028
5029 default:
5030 break;
5031 }
5032
5033 if (len && pos >= 0
5034 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
5035 {
5036 machine_mode mode = GET_MODE (SET_SRC (x));
5037
5038 /* For unsigned, we have a choice of a shift followed by an
5039 AND or two shifts. Use two shifts for field sizes where the
5040 constant might be too large. We assume here that we can
5041 always at least get 8-bit constants in an AND insn, which is
5042 true for every current RISC. */
5043
5044 if (unsignedp && len <= 8)
5045 {
5046 unsigned HOST_WIDE_INT mask
5047 = (HOST_WIDE_INT_1U << len) - 1;
5048 SUBST (SET_SRC (x),
5049 gen_rtx_AND (mode,
5050 gen_rtx_LSHIFTRT
5051 (mode, gen_lowpart (mode, inner),
5052 GEN_INT (pos)),
5053 gen_int_mode (mask, mode)));
5054
5055 split = find_split_point (&SET_SRC (x), insn, true);
5056 if (split && split != &SET_SRC (x))
5057 return split;
5058 }
5059 else
5060 {
5061 SUBST (SET_SRC (x),
5062 gen_rtx_fmt_ee
5063 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5064 gen_rtx_ASHIFT (mode,
5065 gen_lowpart (mode, inner),
5066 GEN_INT (GET_MODE_PRECISION (mode)
5067 - len - pos)),
5068 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5069
5070 split = find_split_point (&SET_SRC (x), insn, true);
5071 if (split && split != &SET_SRC (x))
5072 return split;
5073 }
5074 }
5075
5076 /* See if this is a simple operation with a constant as the second
5077 operand. It might be that this constant is out of range and hence
5078 could be used as a split point. */
5079 if (BINARY_P (SET_SRC (x))
5080 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5081 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5082 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5083 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5084 return &XEXP (SET_SRC (x), 1);
5085
5086 /* Finally, see if this is a simple operation with its first operand
5087 not in a register. The operation might require this operand in a
5088 register, so return it as a split point. We can always do this
5089 because if the first operand were another operation, we would have
5090 already found it as a split point. */
5091 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5092 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5093 return &XEXP (SET_SRC (x), 0);
5094
5095 return 0;
5096
5097 case AND:
5098 case IOR:
5099 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5100 it is better to write this as (not (ior A B)) so we can split it.
5101 Similarly for IOR. */
5102 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5103 {
5104 SUBST (*loc,
5105 gen_rtx_NOT (GET_MODE (x),
5106 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5107 GET_MODE (x),
5108 XEXP (XEXP (x, 0), 0),
5109 XEXP (XEXP (x, 1), 0))));
5110 return find_split_point (loc, insn, set_src);
5111 }
5112
5113 /* Many RISC machines have a large set of logical insns. If the
5114 second operand is a NOT, put it first so we will try to split the
5115 other operand first. */
5116 if (GET_CODE (XEXP (x, 1)) == NOT)
5117 {
5118 rtx tem = XEXP (x, 0);
5119 SUBST (XEXP (x, 0), XEXP (x, 1));
5120 SUBST (XEXP (x, 1), tem);
5121 }
5122 break;
5123
5124 case PLUS:
5125 case MINUS:
5126 /* Canonicalization can produce (minus A (mult B C)), where C is a
5127 constant. It may be better to try splitting (plus (mult B -C) A)
5128 instead if this isn't a multiply by a power of two. */
5129 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5130 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5131 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5132 {
5133 machine_mode mode = GET_MODE (x);
5134 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5135 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5136 SUBST (*loc, gen_rtx_PLUS (mode,
5137 gen_rtx_MULT (mode,
5138 XEXP (XEXP (x, 1), 0),
5139 gen_int_mode (other_int,
5140 mode)),
5141 XEXP (x, 0)));
5142 return find_split_point (loc, insn, set_src);
5143 }
5144
5145 /* Split at a multiply-accumulate instruction. However if this is
5146 the SET_SRC, we likely do not have such an instruction and it's
5147 worthless to try this split. */
5148 if (!set_src
5149 && (GET_CODE (XEXP (x, 0)) == MULT
5150 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5151 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5152 return loc;
5153
5154 default:
5155 break;
5156 }
5157
5158 /* Otherwise, select our actions depending on our rtx class. */
5159 switch (GET_RTX_CLASS (code))
5160 {
5161 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5162 case RTX_TERNARY:
5163 split = find_split_point (&XEXP (x, 2), insn, false);
5164 if (split)
5165 return split;
5166 /* fall through */
5167 case RTX_BIN_ARITH:
5168 case RTX_COMM_ARITH:
5169 case RTX_COMPARE:
5170 case RTX_COMM_COMPARE:
5171 split = find_split_point (&XEXP (x, 1), insn, false);
5172 if (split)
5173 return split;
5174 /* fall through */
5175 case RTX_UNARY:
5176 /* Some machines have (and (shift ...) ...) insns. If X is not
5177 an AND, but XEXP (X, 0) is, use it as our split point. */
5178 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5179 return &XEXP (x, 0);
5180
5181 split = find_split_point (&XEXP (x, 0), insn, false);
5182 if (split)
5183 return split;
5184 return loc;
5185
5186 default:
5187 /* Otherwise, we don't have a split point. */
5188 return 0;
5189 }
5190 }
5191 \f
5192 /* Throughout X, replace FROM with TO, and return the result.
5193 The result is TO if X is FROM;
5194 otherwise the result is X, but its contents may have been modified.
5195 If they were modified, a record was made in undobuf so that
5196 undo_all will (among other things) return X to its original state.
5197
5198 If the number of changes necessary is too much to record to undo,
5199 the excess changes are not made, so the result is invalid.
5200 The changes already made can still be undone.
5201 undobuf.num_undo is incremented for such changes, so by testing that
5202 the caller can tell whether the result is valid.
5203
5204 `n_occurrences' is incremented each time FROM is replaced.
5205
5206 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5207
5208 IN_COND is nonzero if we are at the top level of a condition.
5209
5210 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5211 by copying if `n_occurrences' is nonzero. */
5212
5213 static rtx
5214 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5215 {
5216 enum rtx_code code = GET_CODE (x);
5217 machine_mode op0_mode = VOIDmode;
5218 const char *fmt;
5219 int len, i;
5220 rtx new_rtx;
5221
5222 /* Two expressions are equal if they are identical copies of a shared
5223 RTX or if they are both registers with the same register number
5224 and mode. */
5225
5226 #define COMBINE_RTX_EQUAL_P(X,Y) \
5227 ((X) == (Y) \
5228 || (REG_P (X) && REG_P (Y) \
5229 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5230
5231 /* Do not substitute into clobbers of regs -- this will never result in
5232 valid RTL. */
5233 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5234 return x;
5235
5236 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5237 {
5238 n_occurrences++;
5239 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5240 }
5241
5242 /* If X and FROM are the same register but different modes, they
5243 will not have been seen as equal above. However, the log links code
5244 will make a LOG_LINKS entry for that case. If we do nothing, we
5245 will try to rerecognize our original insn and, when it succeeds,
5246 we will delete the feeding insn, which is incorrect.
5247
5248 So force this insn not to match in this (rare) case. */
5249 if (! in_dest && code == REG && REG_P (from)
5250 && reg_overlap_mentioned_p (x, from))
5251 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5252
5253 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5254 of which may contain things that can be combined. */
5255 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5256 return x;
5257
5258 /* It is possible to have a subexpression appear twice in the insn.
5259 Suppose that FROM is a register that appears within TO.
5260 Then, after that subexpression has been scanned once by `subst',
5261 the second time it is scanned, TO may be found. If we were
5262 to scan TO here, we would find FROM within it and create a
5263 self-referent rtl structure which is completely wrong. */
5264 if (COMBINE_RTX_EQUAL_P (x, to))
5265 return to;
5266
5267 /* Parallel asm_operands need special attention because all of the
5268 inputs are shared across the arms. Furthermore, unsharing the
5269 rtl results in recognition failures. Failure to handle this case
5270 specially can result in circular rtl.
5271
5272 Solve this by doing a normal pass across the first entry of the
5273 parallel, and only processing the SET_DESTs of the subsequent
5274 entries. Ug. */
5275
5276 if (code == PARALLEL
5277 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5278 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5279 {
5280 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5281
5282 /* If this substitution failed, this whole thing fails. */
5283 if (GET_CODE (new_rtx) == CLOBBER
5284 && XEXP (new_rtx, 0) == const0_rtx)
5285 return new_rtx;
5286
5287 SUBST (XVECEXP (x, 0, 0), new_rtx);
5288
5289 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5290 {
5291 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5292
5293 if (!REG_P (dest)
5294 && GET_CODE (dest) != CC0
5295 && GET_CODE (dest) != PC)
5296 {
5297 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5298
5299 /* If this substitution failed, this whole thing fails. */
5300 if (GET_CODE (new_rtx) == CLOBBER
5301 && XEXP (new_rtx, 0) == const0_rtx)
5302 return new_rtx;
5303
5304 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5305 }
5306 }
5307 }
5308 else
5309 {
5310 len = GET_RTX_LENGTH (code);
5311 fmt = GET_RTX_FORMAT (code);
5312
5313 /* We don't need to process a SET_DEST that is a register, CC0,
5314 or PC, so set up to skip this common case. All other cases
5315 where we want to suppress replacing something inside a
5316 SET_SRC are handled via the IN_DEST operand. */
5317 if (code == SET
5318 && (REG_P (SET_DEST (x))
5319 || GET_CODE (SET_DEST (x)) == CC0
5320 || GET_CODE (SET_DEST (x)) == PC))
5321 fmt = "ie";
5322
5323 /* Trying to simplify the operands of a widening MULT is not likely
5324 to create RTL matching a machine insn. */
5325 if (code == MULT
5326 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5327 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5328 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5329 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5330 && REG_P (XEXP (XEXP (x, 0), 0))
5331 && REG_P (XEXP (XEXP (x, 1), 0))
5332 && from == to)
5333 return x;
5334
5335
5336 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5337 constant. */
5338 if (fmt[0] == 'e')
5339 op0_mode = GET_MODE (XEXP (x, 0));
5340
5341 for (i = 0; i < len; i++)
5342 {
5343 if (fmt[i] == 'E')
5344 {
5345 int j;
5346 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5347 {
5348 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5349 {
5350 new_rtx = (unique_copy && n_occurrences
5351 ? copy_rtx (to) : to);
5352 n_occurrences++;
5353 }
5354 else
5355 {
5356 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5357 unique_copy);
5358
5359 /* If this substitution failed, this whole thing
5360 fails. */
5361 if (GET_CODE (new_rtx) == CLOBBER
5362 && XEXP (new_rtx, 0) == const0_rtx)
5363 return new_rtx;
5364 }
5365
5366 SUBST (XVECEXP (x, i, j), new_rtx);
5367 }
5368 }
5369 else if (fmt[i] == 'e')
5370 {
5371 /* If this is a register being set, ignore it. */
5372 new_rtx = XEXP (x, i);
5373 if (in_dest
5374 && i == 0
5375 && (((code == SUBREG || code == ZERO_EXTRACT)
5376 && REG_P (new_rtx))
5377 || code == STRICT_LOW_PART))
5378 ;
5379
5380 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5381 {
5382 /* In general, don't install a subreg involving two
5383 modes not tieable. It can worsen register
5384 allocation, and can even make invalid reload
5385 insns, since the reg inside may need to be copied
5386 from in the outside mode, and that may be invalid
5387 if it is an fp reg copied in integer mode.
5388
5389 We allow two exceptions to this: It is valid if
5390 it is inside another SUBREG and the mode of that
5391 SUBREG and the mode of the inside of TO is
5392 tieable and it is valid if X is a SET that copies
5393 FROM to CC0. */
5394
5395 if (GET_CODE (to) == SUBREG
5396 && ! MODES_TIEABLE_P (GET_MODE (to),
5397 GET_MODE (SUBREG_REG (to)))
5398 && ! (code == SUBREG
5399 && MODES_TIEABLE_P (GET_MODE (x),
5400 GET_MODE (SUBREG_REG (to))))
5401 && (!HAVE_cc0
5402 || (! (code == SET
5403 && i == 1
5404 && XEXP (x, 0) == cc0_rtx))))
5405 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5406
5407 if (code == SUBREG
5408 && REG_P (to)
5409 && REGNO (to) < FIRST_PSEUDO_REGISTER
5410 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5411 SUBREG_BYTE (x),
5412 GET_MODE (x)) < 0)
5413 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5414
5415 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5416 n_occurrences++;
5417 }
5418 else
5419 /* If we are in a SET_DEST, suppress most cases unless we
5420 have gone inside a MEM, in which case we want to
5421 simplify the address. We assume here that things that
5422 are actually part of the destination have their inner
5423 parts in the first expression. This is true for SUBREG,
5424 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5425 things aside from REG and MEM that should appear in a
5426 SET_DEST. */
5427 new_rtx = subst (XEXP (x, i), from, to,
5428 (((in_dest
5429 && (code == SUBREG || code == STRICT_LOW_PART
5430 || code == ZERO_EXTRACT))
5431 || code == SET)
5432 && i == 0),
5433 code == IF_THEN_ELSE && i == 0,
5434 unique_copy);
5435
5436 /* If we found that we will have to reject this combination,
5437 indicate that by returning the CLOBBER ourselves, rather than
5438 an expression containing it. This will speed things up as
5439 well as prevent accidents where two CLOBBERs are considered
5440 to be equal, thus producing an incorrect simplification. */
5441
5442 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5443 return new_rtx;
5444
5445 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5446 {
5447 machine_mode mode = GET_MODE (x);
5448
5449 x = simplify_subreg (GET_MODE (x), new_rtx,
5450 GET_MODE (SUBREG_REG (x)),
5451 SUBREG_BYTE (x));
5452 if (! x)
5453 x = gen_rtx_CLOBBER (mode, const0_rtx);
5454 }
5455 else if (CONST_SCALAR_INT_P (new_rtx)
5456 && GET_CODE (x) == ZERO_EXTEND)
5457 {
5458 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5459 new_rtx, GET_MODE (XEXP (x, 0)));
5460 gcc_assert (x);
5461 }
5462 else
5463 SUBST (XEXP (x, i), new_rtx);
5464 }
5465 }
5466 }
5467
5468 /* Check if we are loading something from the constant pool via float
5469 extension; in this case we would undo compress_float_constant
5470 optimization and degenerate constant load to an immediate value. */
5471 if (GET_CODE (x) == FLOAT_EXTEND
5472 && MEM_P (XEXP (x, 0))
5473 && MEM_READONLY_P (XEXP (x, 0)))
5474 {
5475 rtx tmp = avoid_constant_pool_reference (x);
5476 if (x != tmp)
5477 return x;
5478 }
5479
5480 /* Try to simplify X. If the simplification changed the code, it is likely
5481 that further simplification will help, so loop, but limit the number
5482 of repetitions that will be performed. */
5483
5484 for (i = 0; i < 4; i++)
5485 {
5486 /* If X is sufficiently simple, don't bother trying to do anything
5487 with it. */
5488 if (code != CONST_INT && code != REG && code != CLOBBER)
5489 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5490
5491 if (GET_CODE (x) == code)
5492 break;
5493
5494 code = GET_CODE (x);
5495
5496 /* We no longer know the original mode of operand 0 since we
5497 have changed the form of X) */
5498 op0_mode = VOIDmode;
5499 }
5500
5501 return x;
5502 }
5503 \f
5504 /* If X is a commutative operation whose operands are not in the canonical
5505 order, use substitutions to swap them. */
5506
5507 static void
5508 maybe_swap_commutative_operands (rtx x)
5509 {
5510 if (COMMUTATIVE_ARITH_P (x)
5511 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5512 {
5513 rtx temp = XEXP (x, 0);
5514 SUBST (XEXP (x, 0), XEXP (x, 1));
5515 SUBST (XEXP (x, 1), temp);
5516 }
5517 }
5518
5519 /* Simplify X, a piece of RTL. We just operate on the expression at the
5520 outer level; call `subst' to simplify recursively. Return the new
5521 expression.
5522
5523 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5524 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5525 of a condition. */
5526
5527 static rtx
5528 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5529 int in_cond)
5530 {
5531 enum rtx_code code = GET_CODE (x);
5532 machine_mode mode = GET_MODE (x);
5533 rtx temp;
5534 int i;
5535
5536 /* If this is a commutative operation, put a constant last and a complex
5537 expression first. We don't need to do this for comparisons here. */
5538 maybe_swap_commutative_operands (x);
5539
5540 /* Try to fold this expression in case we have constants that weren't
5541 present before. */
5542 temp = 0;
5543 switch (GET_RTX_CLASS (code))
5544 {
5545 case RTX_UNARY:
5546 if (op0_mode == VOIDmode)
5547 op0_mode = GET_MODE (XEXP (x, 0));
5548 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5549 break;
5550 case RTX_COMPARE:
5551 case RTX_COMM_COMPARE:
5552 {
5553 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5554 if (cmp_mode == VOIDmode)
5555 {
5556 cmp_mode = GET_MODE (XEXP (x, 1));
5557 if (cmp_mode == VOIDmode)
5558 cmp_mode = op0_mode;
5559 }
5560 temp = simplify_relational_operation (code, mode, cmp_mode,
5561 XEXP (x, 0), XEXP (x, 1));
5562 }
5563 break;
5564 case RTX_COMM_ARITH:
5565 case RTX_BIN_ARITH:
5566 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5567 break;
5568 case RTX_BITFIELD_OPS:
5569 case RTX_TERNARY:
5570 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5571 XEXP (x, 1), XEXP (x, 2));
5572 break;
5573 default:
5574 break;
5575 }
5576
5577 if (temp)
5578 {
5579 x = temp;
5580 code = GET_CODE (temp);
5581 op0_mode = VOIDmode;
5582 mode = GET_MODE (temp);
5583 }
5584
5585 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5586 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5587 things. Check for cases where both arms are testing the same
5588 condition.
5589
5590 Don't do anything if all operands are very simple. */
5591
5592 if ((BINARY_P (x)
5593 && ((!OBJECT_P (XEXP (x, 0))
5594 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5595 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5596 || (!OBJECT_P (XEXP (x, 1))
5597 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5598 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5599 || (UNARY_P (x)
5600 && (!OBJECT_P (XEXP (x, 0))
5601 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5602 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5603 {
5604 rtx cond, true_rtx, false_rtx;
5605 unsigned HOST_WIDE_INT nz;
5606
5607 /* If the operation is an AND wrapped in a SIGN_EXTEND or ZERO_EXTEND with
5608 either operand being just a constant single bit value, do nothing since
5609 IF_THEN_ELSE is likely to increase the expression's complexity. */
5610 if (HWI_COMPUTABLE_MODE_P (mode)
5611 && pow2p_hwi (nz = nonzero_bits (x, mode))
5612 && ! ((code == SIGN_EXTEND || code == ZERO_EXTEND)
5613 && GET_CODE (XEXP (x, 0)) == AND
5614 && CONST_INT_P (XEXP (XEXP (x, 0), 0))
5615 && UINTVAL (XEXP (XEXP (x, 0), 0)) == nz))
5616 return x;
5617
5618 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5619 if (cond != 0
5620 /* If everything is a comparison, what we have is highly unlikely
5621 to be simpler, so don't use it. */
5622 && ! (COMPARISON_P (x)
5623 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5624 {
5625 rtx cop1 = const0_rtx;
5626 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5627
5628 if (cond_code == NE && COMPARISON_P (cond))
5629 return x;
5630
5631 /* Simplify the alternative arms; this may collapse the true and
5632 false arms to store-flag values. Be careful to use copy_rtx
5633 here since true_rtx or false_rtx might share RTL with x as a
5634 result of the if_then_else_cond call above. */
5635 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5636 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5637
5638 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5639 is unlikely to be simpler. */
5640 if (general_operand (true_rtx, VOIDmode)
5641 && general_operand (false_rtx, VOIDmode))
5642 {
5643 enum rtx_code reversed;
5644
5645 /* Restarting if we generate a store-flag expression will cause
5646 us to loop. Just drop through in this case. */
5647
5648 /* If the result values are STORE_FLAG_VALUE and zero, we can
5649 just make the comparison operation. */
5650 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5651 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5652 cond, cop1);
5653 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5654 && ((reversed = reversed_comparison_code_parts
5655 (cond_code, cond, cop1, NULL))
5656 != UNKNOWN))
5657 x = simplify_gen_relational (reversed, mode, VOIDmode,
5658 cond, cop1);
5659
5660 /* Likewise, we can make the negate of a comparison operation
5661 if the result values are - STORE_FLAG_VALUE and zero. */
5662 else if (CONST_INT_P (true_rtx)
5663 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5664 && false_rtx == const0_rtx)
5665 x = simplify_gen_unary (NEG, mode,
5666 simplify_gen_relational (cond_code,
5667 mode, VOIDmode,
5668 cond, cop1),
5669 mode);
5670 else if (CONST_INT_P (false_rtx)
5671 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5672 && true_rtx == const0_rtx
5673 && ((reversed = reversed_comparison_code_parts
5674 (cond_code, cond, cop1, NULL))
5675 != UNKNOWN))
5676 x = simplify_gen_unary (NEG, mode,
5677 simplify_gen_relational (reversed,
5678 mode, VOIDmode,
5679 cond, cop1),
5680 mode);
5681 else
5682 return gen_rtx_IF_THEN_ELSE (mode,
5683 simplify_gen_relational (cond_code,
5684 mode,
5685 VOIDmode,
5686 cond,
5687 cop1),
5688 true_rtx, false_rtx);
5689
5690 code = GET_CODE (x);
5691 op0_mode = VOIDmode;
5692 }
5693 }
5694 }
5695
5696 /* First see if we can apply the inverse distributive law. */
5697 if (code == PLUS || code == MINUS
5698 || code == AND || code == IOR || code == XOR)
5699 {
5700 x = apply_distributive_law (x);
5701 code = GET_CODE (x);
5702 op0_mode = VOIDmode;
5703 }
5704
5705 /* If CODE is an associative operation not otherwise handled, see if we
5706 can associate some operands. This can win if they are constants or
5707 if they are logically related (i.e. (a & b) & a). */
5708 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5709 || code == AND || code == IOR || code == XOR
5710 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5711 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5712 || (flag_associative_math && FLOAT_MODE_P (mode))))
5713 {
5714 if (GET_CODE (XEXP (x, 0)) == code)
5715 {
5716 rtx other = XEXP (XEXP (x, 0), 0);
5717 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5718 rtx inner_op1 = XEXP (x, 1);
5719 rtx inner;
5720
5721 /* Make sure we pass the constant operand if any as the second
5722 one if this is a commutative operation. */
5723 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5724 std::swap (inner_op0, inner_op1);
5725 inner = simplify_binary_operation (code == MINUS ? PLUS
5726 : code == DIV ? MULT
5727 : code,
5728 mode, inner_op0, inner_op1);
5729
5730 /* For commutative operations, try the other pair if that one
5731 didn't simplify. */
5732 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5733 {
5734 other = XEXP (XEXP (x, 0), 1);
5735 inner = simplify_binary_operation (code, mode,
5736 XEXP (XEXP (x, 0), 0),
5737 XEXP (x, 1));
5738 }
5739
5740 if (inner)
5741 return simplify_gen_binary (code, mode, other, inner);
5742 }
5743 }
5744
5745 /* A little bit of algebraic simplification here. */
5746 switch (code)
5747 {
5748 case MEM:
5749 /* Ensure that our address has any ASHIFTs converted to MULT in case
5750 address-recognizing predicates are called later. */
5751 temp = make_compound_operation (XEXP (x, 0), MEM);
5752 SUBST (XEXP (x, 0), temp);
5753 break;
5754
5755 case SUBREG:
5756 if (op0_mode == VOIDmode)
5757 op0_mode = GET_MODE (SUBREG_REG (x));
5758
5759 /* See if this can be moved to simplify_subreg. */
5760 if (CONSTANT_P (SUBREG_REG (x))
5761 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5762 /* Don't call gen_lowpart if the inner mode
5763 is VOIDmode and we cannot simplify it, as SUBREG without
5764 inner mode is invalid. */
5765 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5766 || gen_lowpart_common (mode, SUBREG_REG (x))))
5767 return gen_lowpart (mode, SUBREG_REG (x));
5768
5769 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5770 break;
5771 {
5772 rtx temp;
5773 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5774 SUBREG_BYTE (x));
5775 if (temp)
5776 return temp;
5777
5778 /* If op is known to have all lower bits zero, the result is zero. */
5779 if (!in_dest
5780 && SCALAR_INT_MODE_P (mode)
5781 && SCALAR_INT_MODE_P (op0_mode)
5782 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (op0_mode)
5783 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5784 && HWI_COMPUTABLE_MODE_P (op0_mode)
5785 && (nonzero_bits (SUBREG_REG (x), op0_mode)
5786 & GET_MODE_MASK (mode)) == 0)
5787 return CONST0_RTX (mode);
5788 }
5789
5790 /* Don't change the mode of the MEM if that would change the meaning
5791 of the address. */
5792 if (MEM_P (SUBREG_REG (x))
5793 && (MEM_VOLATILE_P (SUBREG_REG (x))
5794 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5795 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5796 return gen_rtx_CLOBBER (mode, const0_rtx);
5797
5798 /* Note that we cannot do any narrowing for non-constants since
5799 we might have been counting on using the fact that some bits were
5800 zero. We now do this in the SET. */
5801
5802 break;
5803
5804 case NEG:
5805 temp = expand_compound_operation (XEXP (x, 0));
5806
5807 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5808 replaced by (lshiftrt X C). This will convert
5809 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5810
5811 if (GET_CODE (temp) == ASHIFTRT
5812 && CONST_INT_P (XEXP (temp, 1))
5813 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5814 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5815 INTVAL (XEXP (temp, 1)));
5816
5817 /* If X has only a single bit that might be nonzero, say, bit I, convert
5818 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5819 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5820 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5821 or a SUBREG of one since we'd be making the expression more
5822 complex if it was just a register. */
5823
5824 if (!REG_P (temp)
5825 && ! (GET_CODE (temp) == SUBREG
5826 && REG_P (SUBREG_REG (temp)))
5827 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5828 {
5829 rtx temp1 = simplify_shift_const
5830 (NULL_RTX, ASHIFTRT, mode,
5831 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5832 GET_MODE_PRECISION (mode) - 1 - i),
5833 GET_MODE_PRECISION (mode) - 1 - i);
5834
5835 /* If all we did was surround TEMP with the two shifts, we
5836 haven't improved anything, so don't use it. Otherwise,
5837 we are better off with TEMP1. */
5838 if (GET_CODE (temp1) != ASHIFTRT
5839 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5840 || XEXP (XEXP (temp1, 0), 0) != temp)
5841 return temp1;
5842 }
5843 break;
5844
5845 case TRUNCATE:
5846 /* We can't handle truncation to a partial integer mode here
5847 because we don't know the real bitsize of the partial
5848 integer mode. */
5849 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5850 break;
5851
5852 if (HWI_COMPUTABLE_MODE_P (mode))
5853 SUBST (XEXP (x, 0),
5854 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5855 GET_MODE_MASK (mode), 0));
5856
5857 /* We can truncate a constant value and return it. */
5858 if (CONST_INT_P (XEXP (x, 0)))
5859 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5860
5861 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5862 whose value is a comparison can be replaced with a subreg if
5863 STORE_FLAG_VALUE permits. */
5864 if (HWI_COMPUTABLE_MODE_P (mode)
5865 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5866 && (temp = get_last_value (XEXP (x, 0)))
5867 && COMPARISON_P (temp))
5868 return gen_lowpart (mode, XEXP (x, 0));
5869 break;
5870
5871 case CONST:
5872 /* (const (const X)) can become (const X). Do it this way rather than
5873 returning the inner CONST since CONST can be shared with a
5874 REG_EQUAL note. */
5875 if (GET_CODE (XEXP (x, 0)) == CONST)
5876 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5877 break;
5878
5879 case LO_SUM:
5880 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5881 can add in an offset. find_split_point will split this address up
5882 again if it doesn't match. */
5883 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5884 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5885 return XEXP (x, 1);
5886 break;
5887
5888 case PLUS:
5889 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5890 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5891 bit-field and can be replaced by either a sign_extend or a
5892 sign_extract. The `and' may be a zero_extend and the two
5893 <c>, -<c> constants may be reversed. */
5894 if (GET_CODE (XEXP (x, 0)) == XOR
5895 && CONST_INT_P (XEXP (x, 1))
5896 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5897 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5898 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5899 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5900 && HWI_COMPUTABLE_MODE_P (mode)
5901 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5902 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5903 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5904 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5905 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5906 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5907 == (unsigned int) i + 1))))
5908 return simplify_shift_const
5909 (NULL_RTX, ASHIFTRT, mode,
5910 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5911 XEXP (XEXP (XEXP (x, 0), 0), 0),
5912 GET_MODE_PRECISION (mode) - (i + 1)),
5913 GET_MODE_PRECISION (mode) - (i + 1));
5914
5915 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5916 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5917 the bitsize of the mode - 1. This allows simplification of
5918 "a = (b & 8) == 0;" */
5919 if (XEXP (x, 1) == constm1_rtx
5920 && !REG_P (XEXP (x, 0))
5921 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5922 && REG_P (SUBREG_REG (XEXP (x, 0))))
5923 && nonzero_bits (XEXP (x, 0), mode) == 1)
5924 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5925 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5926 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5927 GET_MODE_PRECISION (mode) - 1),
5928 GET_MODE_PRECISION (mode) - 1);
5929
5930 /* If we are adding two things that have no bits in common, convert
5931 the addition into an IOR. This will often be further simplified,
5932 for example in cases like ((a & 1) + (a & 2)), which can
5933 become a & 3. */
5934
5935 if (HWI_COMPUTABLE_MODE_P (mode)
5936 && (nonzero_bits (XEXP (x, 0), mode)
5937 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5938 {
5939 /* Try to simplify the expression further. */
5940 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5941 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5942
5943 /* If we could, great. If not, do not go ahead with the IOR
5944 replacement, since PLUS appears in many special purpose
5945 address arithmetic instructions. */
5946 if (GET_CODE (temp) != CLOBBER
5947 && (GET_CODE (temp) != IOR
5948 || ((XEXP (temp, 0) != XEXP (x, 0)
5949 || XEXP (temp, 1) != XEXP (x, 1))
5950 && (XEXP (temp, 0) != XEXP (x, 1)
5951 || XEXP (temp, 1) != XEXP (x, 0)))))
5952 return temp;
5953 }
5954
5955 /* Canonicalize x + x into x << 1. */
5956 if (GET_MODE_CLASS (mode) == MODE_INT
5957 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
5958 && !side_effects_p (XEXP (x, 0)))
5959 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
5960
5961 break;
5962
5963 case MINUS:
5964 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5965 (and <foo> (const_int pow2-1)) */
5966 if (GET_CODE (XEXP (x, 1)) == AND
5967 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5968 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
5969 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5970 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5971 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5972 break;
5973
5974 case MULT:
5975 /* If we have (mult (plus A B) C), apply the distributive law and then
5976 the inverse distributive law to see if things simplify. This
5977 occurs mostly in addresses, often when unrolling loops. */
5978
5979 if (GET_CODE (XEXP (x, 0)) == PLUS)
5980 {
5981 rtx result = distribute_and_simplify_rtx (x, 0);
5982 if (result)
5983 return result;
5984 }
5985
5986 /* Try simplify a*(b/c) as (a*b)/c. */
5987 if (FLOAT_MODE_P (mode) && flag_associative_math
5988 && GET_CODE (XEXP (x, 0)) == DIV)
5989 {
5990 rtx tem = simplify_binary_operation (MULT, mode,
5991 XEXP (XEXP (x, 0), 0),
5992 XEXP (x, 1));
5993 if (tem)
5994 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5995 }
5996 break;
5997
5998 case UDIV:
5999 /* If this is a divide by a power of two, treat it as a shift if
6000 its first operand is a shift. */
6001 if (CONST_INT_P (XEXP (x, 1))
6002 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6003 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6004 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6005 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6006 || GET_CODE (XEXP (x, 0)) == ROTATE
6007 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6008 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
6009 break;
6010
6011 case EQ: case NE:
6012 case GT: case GTU: case GE: case GEU:
6013 case LT: case LTU: case LE: case LEU:
6014 case UNEQ: case LTGT:
6015 case UNGT: case UNGE:
6016 case UNLT: case UNLE:
6017 case UNORDERED: case ORDERED:
6018 /* If the first operand is a condition code, we can't do anything
6019 with it. */
6020 if (GET_CODE (XEXP (x, 0)) == COMPARE
6021 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6022 && ! CC0_P (XEXP (x, 0))))
6023 {
6024 rtx op0 = XEXP (x, 0);
6025 rtx op1 = XEXP (x, 1);
6026 enum rtx_code new_code;
6027
6028 if (GET_CODE (op0) == COMPARE)
6029 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6030
6031 /* Simplify our comparison, if possible. */
6032 new_code = simplify_comparison (code, &op0, &op1);
6033
6034 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6035 if only the low-order bit is possibly nonzero in X (such as when
6036 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6037 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6038 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6039 (plus X 1).
6040
6041 Remove any ZERO_EXTRACT we made when thinking this was a
6042 comparison. It may now be simpler to use, e.g., an AND. If a
6043 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6044 the call to make_compound_operation in the SET case.
6045
6046 Don't apply these optimizations if the caller would
6047 prefer a comparison rather than a value.
6048 E.g., for the condition in an IF_THEN_ELSE most targets need
6049 an explicit comparison. */
6050
6051 if (in_cond)
6052 ;
6053
6054 else if (STORE_FLAG_VALUE == 1
6055 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6056 && op1 == const0_rtx
6057 && mode == GET_MODE (op0)
6058 && nonzero_bits (op0, mode) == 1)
6059 return gen_lowpart (mode,
6060 expand_compound_operation (op0));
6061
6062 else if (STORE_FLAG_VALUE == 1
6063 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6064 && op1 == const0_rtx
6065 && mode == GET_MODE (op0)
6066 && (num_sign_bit_copies (op0, mode)
6067 == GET_MODE_PRECISION (mode)))
6068 {
6069 op0 = expand_compound_operation (op0);
6070 return simplify_gen_unary (NEG, mode,
6071 gen_lowpart (mode, op0),
6072 mode);
6073 }
6074
6075 else if (STORE_FLAG_VALUE == 1
6076 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6077 && op1 == const0_rtx
6078 && mode == GET_MODE (op0)
6079 && nonzero_bits (op0, mode) == 1)
6080 {
6081 op0 = expand_compound_operation (op0);
6082 return simplify_gen_binary (XOR, mode,
6083 gen_lowpart (mode, op0),
6084 const1_rtx);
6085 }
6086
6087 else if (STORE_FLAG_VALUE == 1
6088 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6089 && op1 == const0_rtx
6090 && mode == GET_MODE (op0)
6091 && (num_sign_bit_copies (op0, mode)
6092 == GET_MODE_PRECISION (mode)))
6093 {
6094 op0 = expand_compound_operation (op0);
6095 return plus_constant (mode, gen_lowpart (mode, op0), 1);
6096 }
6097
6098 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6099 those above. */
6100 if (in_cond)
6101 ;
6102
6103 else if (STORE_FLAG_VALUE == -1
6104 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6105 && op1 == const0_rtx
6106 && mode == GET_MODE (op0)
6107 && (num_sign_bit_copies (op0, mode)
6108 == GET_MODE_PRECISION (mode)))
6109 return gen_lowpart (mode,
6110 expand_compound_operation (op0));
6111
6112 else if (STORE_FLAG_VALUE == -1
6113 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6114 && op1 == const0_rtx
6115 && mode == GET_MODE (op0)
6116 && nonzero_bits (op0, mode) == 1)
6117 {
6118 op0 = expand_compound_operation (op0);
6119 return simplify_gen_unary (NEG, mode,
6120 gen_lowpart (mode, op0),
6121 mode);
6122 }
6123
6124 else if (STORE_FLAG_VALUE == -1
6125 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6126 && op1 == const0_rtx
6127 && mode == GET_MODE (op0)
6128 && (num_sign_bit_copies (op0, mode)
6129 == GET_MODE_PRECISION (mode)))
6130 {
6131 op0 = expand_compound_operation (op0);
6132 return simplify_gen_unary (NOT, mode,
6133 gen_lowpart (mode, op0),
6134 mode);
6135 }
6136
6137 /* If X is 0/1, (eq X 0) is X-1. */
6138 else if (STORE_FLAG_VALUE == -1
6139 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6140 && op1 == const0_rtx
6141 && mode == GET_MODE (op0)
6142 && nonzero_bits (op0, mode) == 1)
6143 {
6144 op0 = expand_compound_operation (op0);
6145 return plus_constant (mode, gen_lowpart (mode, op0), -1);
6146 }
6147
6148 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6149 one bit that might be nonzero, we can convert (ne x 0) to
6150 (ashift x c) where C puts the bit in the sign bit. Remove any
6151 AND with STORE_FLAG_VALUE when we are done, since we are only
6152 going to test the sign bit. */
6153 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6154 && HWI_COMPUTABLE_MODE_P (mode)
6155 && val_signbit_p (mode, STORE_FLAG_VALUE)
6156 && op1 == const0_rtx
6157 && mode == GET_MODE (op0)
6158 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
6159 {
6160 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6161 expand_compound_operation (op0),
6162 GET_MODE_PRECISION (mode) - 1 - i);
6163 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6164 return XEXP (x, 0);
6165 else
6166 return x;
6167 }
6168
6169 /* If the code changed, return a whole new comparison.
6170 We also need to avoid using SUBST in cases where
6171 simplify_comparison has widened a comparison with a CONST_INT,
6172 since in that case the wider CONST_INT may fail the sanity
6173 checks in do_SUBST. */
6174 if (new_code != code
6175 || (CONST_INT_P (op1)
6176 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6177 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6178 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6179
6180 /* Otherwise, keep this operation, but maybe change its operands.
6181 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6182 SUBST (XEXP (x, 0), op0);
6183 SUBST (XEXP (x, 1), op1);
6184 }
6185 break;
6186
6187 case IF_THEN_ELSE:
6188 return simplify_if_then_else (x);
6189
6190 case ZERO_EXTRACT:
6191 case SIGN_EXTRACT:
6192 case ZERO_EXTEND:
6193 case SIGN_EXTEND:
6194 /* If we are processing SET_DEST, we are done. */
6195 if (in_dest)
6196 return x;
6197
6198 return expand_compound_operation (x);
6199
6200 case SET:
6201 return simplify_set (x);
6202
6203 case AND:
6204 case IOR:
6205 return simplify_logical (x);
6206
6207 case ASHIFT:
6208 case LSHIFTRT:
6209 case ASHIFTRT:
6210 case ROTATE:
6211 case ROTATERT:
6212 /* If this is a shift by a constant amount, simplify it. */
6213 if (CONST_INT_P (XEXP (x, 1)))
6214 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6215 INTVAL (XEXP (x, 1)));
6216
6217 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6218 SUBST (XEXP (x, 1),
6219 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6220 (HOST_WIDE_INT_1U
6221 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6222 - 1,
6223 0));
6224 break;
6225
6226 default:
6227 break;
6228 }
6229
6230 return x;
6231 }
6232 \f
6233 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6234
6235 static rtx
6236 simplify_if_then_else (rtx x)
6237 {
6238 machine_mode mode = GET_MODE (x);
6239 rtx cond = XEXP (x, 0);
6240 rtx true_rtx = XEXP (x, 1);
6241 rtx false_rtx = XEXP (x, 2);
6242 enum rtx_code true_code = GET_CODE (cond);
6243 int comparison_p = COMPARISON_P (cond);
6244 rtx temp;
6245 int i;
6246 enum rtx_code false_code;
6247 rtx reversed;
6248
6249 /* Simplify storing of the truth value. */
6250 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6251 return simplify_gen_relational (true_code, mode, VOIDmode,
6252 XEXP (cond, 0), XEXP (cond, 1));
6253
6254 /* Also when the truth value has to be reversed. */
6255 if (comparison_p
6256 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6257 && (reversed = reversed_comparison (cond, mode)))
6258 return reversed;
6259
6260 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6261 in it is being compared against certain values. Get the true and false
6262 comparisons and see if that says anything about the value of each arm. */
6263
6264 if (comparison_p
6265 && ((false_code = reversed_comparison_code (cond, NULL))
6266 != UNKNOWN)
6267 && REG_P (XEXP (cond, 0)))
6268 {
6269 HOST_WIDE_INT nzb;
6270 rtx from = XEXP (cond, 0);
6271 rtx true_val = XEXP (cond, 1);
6272 rtx false_val = true_val;
6273 int swapped = 0;
6274
6275 /* If FALSE_CODE is EQ, swap the codes and arms. */
6276
6277 if (false_code == EQ)
6278 {
6279 swapped = 1, true_code = EQ, false_code = NE;
6280 std::swap (true_rtx, false_rtx);
6281 }
6282
6283 /* If we are comparing against zero and the expression being tested has
6284 only a single bit that might be nonzero, that is its value when it is
6285 not equal to zero. Similarly if it is known to be -1 or 0. */
6286
6287 if (true_code == EQ && true_val == const0_rtx
6288 && pow2p_hwi (nzb = nonzero_bits (from, GET_MODE (from))))
6289 {
6290 false_code = EQ;
6291 false_val = gen_int_mode (nzb, GET_MODE (from));
6292 }
6293 else if (true_code == EQ && true_val == const0_rtx
6294 && (num_sign_bit_copies (from, GET_MODE (from))
6295 == GET_MODE_PRECISION (GET_MODE (from))))
6296 {
6297 false_code = EQ;
6298 false_val = constm1_rtx;
6299 }
6300
6301 /* Now simplify an arm if we know the value of the register in the
6302 branch and it is used in the arm. Be careful due to the potential
6303 of locally-shared RTL. */
6304
6305 if (reg_mentioned_p (from, true_rtx))
6306 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6307 from, true_val),
6308 pc_rtx, pc_rtx, 0, 0, 0);
6309 if (reg_mentioned_p (from, false_rtx))
6310 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6311 from, false_val),
6312 pc_rtx, pc_rtx, 0, 0, 0);
6313
6314 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6315 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6316
6317 true_rtx = XEXP (x, 1);
6318 false_rtx = XEXP (x, 2);
6319 true_code = GET_CODE (cond);
6320 }
6321
6322 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6323 reversed, do so to avoid needing two sets of patterns for
6324 subtract-and-branch insns. Similarly if we have a constant in the true
6325 arm, the false arm is the same as the first operand of the comparison, or
6326 the false arm is more complicated than the true arm. */
6327
6328 if (comparison_p
6329 && reversed_comparison_code (cond, NULL) != UNKNOWN
6330 && (true_rtx == pc_rtx
6331 || (CONSTANT_P (true_rtx)
6332 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6333 || true_rtx == const0_rtx
6334 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6335 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6336 && !OBJECT_P (false_rtx))
6337 || reg_mentioned_p (true_rtx, false_rtx)
6338 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6339 {
6340 true_code = reversed_comparison_code (cond, NULL);
6341 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6342 SUBST (XEXP (x, 1), false_rtx);
6343 SUBST (XEXP (x, 2), true_rtx);
6344
6345 std::swap (true_rtx, false_rtx);
6346 cond = XEXP (x, 0);
6347
6348 /* It is possible that the conditional has been simplified out. */
6349 true_code = GET_CODE (cond);
6350 comparison_p = COMPARISON_P (cond);
6351 }
6352
6353 /* If the two arms are identical, we don't need the comparison. */
6354
6355 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6356 return true_rtx;
6357
6358 /* Convert a == b ? b : a to "a". */
6359 if (true_code == EQ && ! side_effects_p (cond)
6360 && !HONOR_NANS (mode)
6361 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6362 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6363 return false_rtx;
6364 else if (true_code == NE && ! side_effects_p (cond)
6365 && !HONOR_NANS (mode)
6366 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6367 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6368 return true_rtx;
6369
6370 /* Look for cases where we have (abs x) or (neg (abs X)). */
6371
6372 if (GET_MODE_CLASS (mode) == MODE_INT
6373 && comparison_p
6374 && XEXP (cond, 1) == const0_rtx
6375 && GET_CODE (false_rtx) == NEG
6376 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6377 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6378 && ! side_effects_p (true_rtx))
6379 switch (true_code)
6380 {
6381 case GT:
6382 case GE:
6383 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6384 case LT:
6385 case LE:
6386 return
6387 simplify_gen_unary (NEG, mode,
6388 simplify_gen_unary (ABS, mode, true_rtx, mode),
6389 mode);
6390 default:
6391 break;
6392 }
6393
6394 /* Look for MIN or MAX. */
6395
6396 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6397 && comparison_p
6398 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6399 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6400 && ! side_effects_p (cond))
6401 switch (true_code)
6402 {
6403 case GE:
6404 case GT:
6405 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6406 case LE:
6407 case LT:
6408 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6409 case GEU:
6410 case GTU:
6411 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6412 case LEU:
6413 case LTU:
6414 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6415 default:
6416 break;
6417 }
6418
6419 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6420 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6421 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6422 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6423 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6424 neither 1 or -1, but it isn't worth checking for. */
6425
6426 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6427 && comparison_p
6428 && GET_MODE_CLASS (mode) == MODE_INT
6429 && ! side_effects_p (x))
6430 {
6431 rtx t = make_compound_operation (true_rtx, SET);
6432 rtx f = make_compound_operation (false_rtx, SET);
6433 rtx cond_op0 = XEXP (cond, 0);
6434 rtx cond_op1 = XEXP (cond, 1);
6435 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6436 machine_mode m = mode;
6437 rtx z = 0, c1 = NULL_RTX;
6438
6439 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6440 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6441 || GET_CODE (t) == ASHIFT
6442 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6443 && rtx_equal_p (XEXP (t, 0), f))
6444 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6445
6446 /* If an identity-zero op is commutative, check whether there
6447 would be a match if we swapped the operands. */
6448 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6449 || GET_CODE (t) == XOR)
6450 && rtx_equal_p (XEXP (t, 1), f))
6451 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6452 else if (GET_CODE (t) == SIGN_EXTEND
6453 && (GET_CODE (XEXP (t, 0)) == PLUS
6454 || GET_CODE (XEXP (t, 0)) == MINUS
6455 || GET_CODE (XEXP (t, 0)) == IOR
6456 || GET_CODE (XEXP (t, 0)) == XOR
6457 || GET_CODE (XEXP (t, 0)) == ASHIFT
6458 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6459 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6460 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6461 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6462 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6463 && (num_sign_bit_copies (f, GET_MODE (f))
6464 > (unsigned int)
6465 (GET_MODE_PRECISION (mode)
6466 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6467 {
6468 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6469 extend_op = SIGN_EXTEND;
6470 m = GET_MODE (XEXP (t, 0));
6471 }
6472 else if (GET_CODE (t) == SIGN_EXTEND
6473 && (GET_CODE (XEXP (t, 0)) == PLUS
6474 || GET_CODE (XEXP (t, 0)) == IOR
6475 || GET_CODE (XEXP (t, 0)) == XOR)
6476 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6477 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6478 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6479 && (num_sign_bit_copies (f, GET_MODE (f))
6480 > (unsigned int)
6481 (GET_MODE_PRECISION (mode)
6482 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6483 {
6484 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6485 extend_op = SIGN_EXTEND;
6486 m = GET_MODE (XEXP (t, 0));
6487 }
6488 else if (GET_CODE (t) == ZERO_EXTEND
6489 && (GET_CODE (XEXP (t, 0)) == PLUS
6490 || GET_CODE (XEXP (t, 0)) == MINUS
6491 || GET_CODE (XEXP (t, 0)) == IOR
6492 || GET_CODE (XEXP (t, 0)) == XOR
6493 || GET_CODE (XEXP (t, 0)) == ASHIFT
6494 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6495 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6496 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6497 && HWI_COMPUTABLE_MODE_P (mode)
6498 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6499 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6500 && ((nonzero_bits (f, GET_MODE (f))
6501 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6502 == 0))
6503 {
6504 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6505 extend_op = ZERO_EXTEND;
6506 m = GET_MODE (XEXP (t, 0));
6507 }
6508 else if (GET_CODE (t) == ZERO_EXTEND
6509 && (GET_CODE (XEXP (t, 0)) == PLUS
6510 || GET_CODE (XEXP (t, 0)) == IOR
6511 || GET_CODE (XEXP (t, 0)) == XOR)
6512 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6513 && HWI_COMPUTABLE_MODE_P (mode)
6514 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6515 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6516 && ((nonzero_bits (f, GET_MODE (f))
6517 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6518 == 0))
6519 {
6520 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6521 extend_op = ZERO_EXTEND;
6522 m = GET_MODE (XEXP (t, 0));
6523 }
6524
6525 if (z)
6526 {
6527 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6528 cond_op0, cond_op1),
6529 pc_rtx, pc_rtx, 0, 0, 0);
6530 temp = simplify_gen_binary (MULT, m, temp,
6531 simplify_gen_binary (MULT, m, c1,
6532 const_true_rtx));
6533 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6534 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6535
6536 if (extend_op != UNKNOWN)
6537 temp = simplify_gen_unary (extend_op, mode, temp, m);
6538
6539 return temp;
6540 }
6541 }
6542
6543 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6544 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6545 negation of a single bit, we can convert this operation to a shift. We
6546 can actually do this more generally, but it doesn't seem worth it. */
6547
6548 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6549 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6550 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6551 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6552 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6553 == GET_MODE_PRECISION (mode))
6554 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6555 return
6556 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6557 gen_lowpart (mode, XEXP (cond, 0)), i);
6558
6559 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6560 non-zero bit in A is C1. */
6561 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6562 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6563 && INTEGRAL_MODE_P (GET_MODE (XEXP (cond, 0)))
6564 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6565 == nonzero_bits (XEXP (cond, 0), GET_MODE (XEXP (cond, 0)))
6566 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6567 {
6568 rtx val = XEXP (cond, 0);
6569 enum machine_mode val_mode = GET_MODE (val);
6570 if (val_mode == mode)
6571 return val;
6572 else if (GET_MODE_PRECISION (val_mode) < GET_MODE_PRECISION (mode))
6573 return simplify_gen_unary (ZERO_EXTEND, mode, val, val_mode);
6574 }
6575
6576 return x;
6577 }
6578 \f
6579 /* Simplify X, a SET expression. Return the new expression. */
6580
6581 static rtx
6582 simplify_set (rtx x)
6583 {
6584 rtx src = SET_SRC (x);
6585 rtx dest = SET_DEST (x);
6586 machine_mode mode
6587 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6588 rtx_insn *other_insn;
6589 rtx *cc_use;
6590
6591 /* (set (pc) (return)) gets written as (return). */
6592 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6593 return src;
6594
6595 /* Now that we know for sure which bits of SRC we are using, see if we can
6596 simplify the expression for the object knowing that we only need the
6597 low-order bits. */
6598
6599 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6600 {
6601 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6602 SUBST (SET_SRC (x), src);
6603 }
6604
6605 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6606 the comparison result and try to simplify it unless we already have used
6607 undobuf.other_insn. */
6608 if ((GET_MODE_CLASS (mode) == MODE_CC
6609 || GET_CODE (src) == COMPARE
6610 || CC0_P (dest))
6611 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6612 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6613 && COMPARISON_P (*cc_use)
6614 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6615 {
6616 enum rtx_code old_code = GET_CODE (*cc_use);
6617 enum rtx_code new_code;
6618 rtx op0, op1, tmp;
6619 int other_changed = 0;
6620 rtx inner_compare = NULL_RTX;
6621 machine_mode compare_mode = GET_MODE (dest);
6622
6623 if (GET_CODE (src) == COMPARE)
6624 {
6625 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6626 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6627 {
6628 inner_compare = op0;
6629 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6630 }
6631 }
6632 else
6633 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6634
6635 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6636 op0, op1);
6637 if (!tmp)
6638 new_code = old_code;
6639 else if (!CONSTANT_P (tmp))
6640 {
6641 new_code = GET_CODE (tmp);
6642 op0 = XEXP (tmp, 0);
6643 op1 = XEXP (tmp, 1);
6644 }
6645 else
6646 {
6647 rtx pat = PATTERN (other_insn);
6648 undobuf.other_insn = other_insn;
6649 SUBST (*cc_use, tmp);
6650
6651 /* Attempt to simplify CC user. */
6652 if (GET_CODE (pat) == SET)
6653 {
6654 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6655 if (new_rtx != NULL_RTX)
6656 SUBST (SET_SRC (pat), new_rtx);
6657 }
6658
6659 /* Convert X into a no-op move. */
6660 SUBST (SET_DEST (x), pc_rtx);
6661 SUBST (SET_SRC (x), pc_rtx);
6662 return x;
6663 }
6664
6665 /* Simplify our comparison, if possible. */
6666 new_code = simplify_comparison (new_code, &op0, &op1);
6667
6668 #ifdef SELECT_CC_MODE
6669 /* If this machine has CC modes other than CCmode, check to see if we
6670 need to use a different CC mode here. */
6671 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6672 compare_mode = GET_MODE (op0);
6673 else if (inner_compare
6674 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6675 && new_code == old_code
6676 && op0 == XEXP (inner_compare, 0)
6677 && op1 == XEXP (inner_compare, 1))
6678 compare_mode = GET_MODE (inner_compare);
6679 else
6680 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6681
6682 /* If the mode changed, we have to change SET_DEST, the mode in the
6683 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6684 a hard register, just build new versions with the proper mode. If it
6685 is a pseudo, we lose unless it is only time we set the pseudo, in
6686 which case we can safely change its mode. */
6687 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6688 {
6689 if (can_change_dest_mode (dest, 0, compare_mode))
6690 {
6691 unsigned int regno = REGNO (dest);
6692 rtx new_dest;
6693
6694 if (regno < FIRST_PSEUDO_REGISTER)
6695 new_dest = gen_rtx_REG (compare_mode, regno);
6696 else
6697 {
6698 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6699 new_dest = regno_reg_rtx[regno];
6700 }
6701
6702 SUBST (SET_DEST (x), new_dest);
6703 SUBST (XEXP (*cc_use, 0), new_dest);
6704 other_changed = 1;
6705
6706 dest = new_dest;
6707 }
6708 }
6709 #endif /* SELECT_CC_MODE */
6710
6711 /* If the code changed, we have to build a new comparison in
6712 undobuf.other_insn. */
6713 if (new_code != old_code)
6714 {
6715 int other_changed_previously = other_changed;
6716 unsigned HOST_WIDE_INT mask;
6717 rtx old_cc_use = *cc_use;
6718
6719 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6720 dest, const0_rtx));
6721 other_changed = 1;
6722
6723 /* If the only change we made was to change an EQ into an NE or
6724 vice versa, OP0 has only one bit that might be nonzero, and OP1
6725 is zero, check if changing the user of the condition code will
6726 produce a valid insn. If it won't, we can keep the original code
6727 in that insn by surrounding our operation with an XOR. */
6728
6729 if (((old_code == NE && new_code == EQ)
6730 || (old_code == EQ && new_code == NE))
6731 && ! other_changed_previously && op1 == const0_rtx
6732 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6733 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6734 {
6735 rtx pat = PATTERN (other_insn), note = 0;
6736
6737 if ((recog_for_combine (&pat, other_insn, &note) < 0
6738 && ! check_asm_operands (pat)))
6739 {
6740 *cc_use = old_cc_use;
6741 other_changed = 0;
6742
6743 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6744 gen_int_mode (mask,
6745 GET_MODE (op0)));
6746 }
6747 }
6748 }
6749
6750 if (other_changed)
6751 undobuf.other_insn = other_insn;
6752
6753 /* Don't generate a compare of a CC with 0, just use that CC. */
6754 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6755 {
6756 SUBST (SET_SRC (x), op0);
6757 src = SET_SRC (x);
6758 }
6759 /* Otherwise, if we didn't previously have the same COMPARE we
6760 want, create it from scratch. */
6761 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6762 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6763 {
6764 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6765 src = SET_SRC (x);
6766 }
6767 }
6768 else
6769 {
6770 /* Get SET_SRC in a form where we have placed back any
6771 compound expressions. Then do the checks below. */
6772 src = make_compound_operation (src, SET);
6773 SUBST (SET_SRC (x), src);
6774 }
6775
6776 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6777 and X being a REG or (subreg (reg)), we may be able to convert this to
6778 (set (subreg:m2 x) (op)).
6779
6780 We can always do this if M1 is narrower than M2 because that means that
6781 we only care about the low bits of the result.
6782
6783 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6784 perform a narrower operation than requested since the high-order bits will
6785 be undefined. On machine where it is defined, this transformation is safe
6786 as long as M1 and M2 have the same number of words. */
6787
6788 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6789 && !OBJECT_P (SUBREG_REG (src))
6790 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6791 / UNITS_PER_WORD)
6792 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6793 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6794 && (WORD_REGISTER_OPERATIONS
6795 || (GET_MODE_SIZE (GET_MODE (src))
6796 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6797 #ifdef CANNOT_CHANGE_MODE_CLASS
6798 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6799 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6800 GET_MODE (SUBREG_REG (src)),
6801 GET_MODE (src)))
6802 #endif
6803 && (REG_P (dest)
6804 || (GET_CODE (dest) == SUBREG
6805 && REG_P (SUBREG_REG (dest)))))
6806 {
6807 SUBST (SET_DEST (x),
6808 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6809 dest));
6810 SUBST (SET_SRC (x), SUBREG_REG (src));
6811
6812 src = SET_SRC (x), dest = SET_DEST (x);
6813 }
6814
6815 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6816 in SRC. */
6817 if (dest == cc0_rtx
6818 && GET_CODE (src) == SUBREG
6819 && subreg_lowpart_p (src)
6820 && (GET_MODE_PRECISION (GET_MODE (src))
6821 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6822 {
6823 rtx inner = SUBREG_REG (src);
6824 machine_mode inner_mode = GET_MODE (inner);
6825
6826 /* Here we make sure that we don't have a sign bit on. */
6827 if (val_signbit_known_clear_p (GET_MODE (src),
6828 nonzero_bits (inner, inner_mode)))
6829 {
6830 SUBST (SET_SRC (x), inner);
6831 src = SET_SRC (x);
6832 }
6833 }
6834
6835 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6836 would require a paradoxical subreg. Replace the subreg with a
6837 zero_extend to avoid the reload that would otherwise be required. */
6838
6839 enum rtx_code extend_op;
6840 if (paradoxical_subreg_p (src)
6841 && MEM_P (SUBREG_REG (src))
6842 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6843 {
6844 SUBST (SET_SRC (x),
6845 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6846
6847 src = SET_SRC (x);
6848 }
6849
6850 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6851 are comparing an item known to be 0 or -1 against 0, use a logical
6852 operation instead. Check for one of the arms being an IOR of the other
6853 arm with some value. We compute three terms to be IOR'ed together. In
6854 practice, at most two will be nonzero. Then we do the IOR's. */
6855
6856 if (GET_CODE (dest) != PC
6857 && GET_CODE (src) == IF_THEN_ELSE
6858 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6859 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6860 && XEXP (XEXP (src, 0), 1) == const0_rtx
6861 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6862 && (!HAVE_conditional_move
6863 || ! can_conditionally_move_p (GET_MODE (src)))
6864 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6865 GET_MODE (XEXP (XEXP (src, 0), 0)))
6866 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6867 && ! side_effects_p (src))
6868 {
6869 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6870 ? XEXP (src, 1) : XEXP (src, 2));
6871 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6872 ? XEXP (src, 2) : XEXP (src, 1));
6873 rtx term1 = const0_rtx, term2, term3;
6874
6875 if (GET_CODE (true_rtx) == IOR
6876 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6877 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6878 else if (GET_CODE (true_rtx) == IOR
6879 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6880 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6881 else if (GET_CODE (false_rtx) == IOR
6882 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6883 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6884 else if (GET_CODE (false_rtx) == IOR
6885 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6886 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6887
6888 term2 = simplify_gen_binary (AND, GET_MODE (src),
6889 XEXP (XEXP (src, 0), 0), true_rtx);
6890 term3 = simplify_gen_binary (AND, GET_MODE (src),
6891 simplify_gen_unary (NOT, GET_MODE (src),
6892 XEXP (XEXP (src, 0), 0),
6893 GET_MODE (src)),
6894 false_rtx);
6895
6896 SUBST (SET_SRC (x),
6897 simplify_gen_binary (IOR, GET_MODE (src),
6898 simplify_gen_binary (IOR, GET_MODE (src),
6899 term1, term2),
6900 term3));
6901
6902 src = SET_SRC (x);
6903 }
6904
6905 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6906 whole thing fail. */
6907 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6908 return src;
6909 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6910 return dest;
6911 else
6912 /* Convert this into a field assignment operation, if possible. */
6913 return make_field_assignment (x);
6914 }
6915 \f
6916 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6917 result. */
6918
6919 static rtx
6920 simplify_logical (rtx x)
6921 {
6922 machine_mode mode = GET_MODE (x);
6923 rtx op0 = XEXP (x, 0);
6924 rtx op1 = XEXP (x, 1);
6925
6926 switch (GET_CODE (x))
6927 {
6928 case AND:
6929 /* We can call simplify_and_const_int only if we don't lose
6930 any (sign) bits when converting INTVAL (op1) to
6931 "unsigned HOST_WIDE_INT". */
6932 if (CONST_INT_P (op1)
6933 && (HWI_COMPUTABLE_MODE_P (mode)
6934 || INTVAL (op1) > 0))
6935 {
6936 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6937 if (GET_CODE (x) != AND)
6938 return x;
6939
6940 op0 = XEXP (x, 0);
6941 op1 = XEXP (x, 1);
6942 }
6943
6944 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6945 apply the distributive law and then the inverse distributive
6946 law to see if things simplify. */
6947 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6948 {
6949 rtx result = distribute_and_simplify_rtx (x, 0);
6950 if (result)
6951 return result;
6952 }
6953 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6954 {
6955 rtx result = distribute_and_simplify_rtx (x, 1);
6956 if (result)
6957 return result;
6958 }
6959 break;
6960
6961 case IOR:
6962 /* If we have (ior (and A B) C), apply the distributive law and then
6963 the inverse distributive law to see if things simplify. */
6964
6965 if (GET_CODE (op0) == AND)
6966 {
6967 rtx result = distribute_and_simplify_rtx (x, 0);
6968 if (result)
6969 return result;
6970 }
6971
6972 if (GET_CODE (op1) == AND)
6973 {
6974 rtx result = distribute_and_simplify_rtx (x, 1);
6975 if (result)
6976 return result;
6977 }
6978 break;
6979
6980 default:
6981 gcc_unreachable ();
6982 }
6983
6984 return x;
6985 }
6986 \f
6987 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6988 operations" because they can be replaced with two more basic operations.
6989 ZERO_EXTEND is also considered "compound" because it can be replaced with
6990 an AND operation, which is simpler, though only one operation.
6991
6992 The function expand_compound_operation is called with an rtx expression
6993 and will convert it to the appropriate shifts and AND operations,
6994 simplifying at each stage.
6995
6996 The function make_compound_operation is called to convert an expression
6997 consisting of shifts and ANDs into the equivalent compound expression.
6998 It is the inverse of this function, loosely speaking. */
6999
7000 static rtx
7001 expand_compound_operation (rtx x)
7002 {
7003 unsigned HOST_WIDE_INT pos = 0, len;
7004 int unsignedp = 0;
7005 unsigned int modewidth;
7006 rtx tem;
7007
7008 switch (GET_CODE (x))
7009 {
7010 case ZERO_EXTEND:
7011 unsignedp = 1;
7012 /* FALLTHRU */
7013 case SIGN_EXTEND:
7014 /* We can't necessarily use a const_int for a multiword mode;
7015 it depends on implicitly extending the value.
7016 Since we don't know the right way to extend it,
7017 we can't tell whether the implicit way is right.
7018
7019 Even for a mode that is no wider than a const_int,
7020 we can't win, because we need to sign extend one of its bits through
7021 the rest of it, and we don't know which bit. */
7022 if (CONST_INT_P (XEXP (x, 0)))
7023 return x;
7024
7025 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7026 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7027 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7028 reloaded. If not for that, MEM's would very rarely be safe.
7029
7030 Reject MODEs bigger than a word, because we might not be able
7031 to reference a two-register group starting with an arbitrary register
7032 (and currently gen_lowpart might crash for a SUBREG). */
7033
7034 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
7035 return x;
7036
7037 /* Reject MODEs that aren't scalar integers because turning vector
7038 or complex modes into shifts causes problems. */
7039
7040 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
7041 return x;
7042
7043 len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
7044 /* If the inner object has VOIDmode (the only way this can happen
7045 is if it is an ASM_OPERANDS), we can't do anything since we don't
7046 know how much masking to do. */
7047 if (len == 0)
7048 return x;
7049
7050 break;
7051
7052 case ZERO_EXTRACT:
7053 unsignedp = 1;
7054
7055 /* fall through */
7056
7057 case SIGN_EXTRACT:
7058 /* If the operand is a CLOBBER, just return it. */
7059 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7060 return XEXP (x, 0);
7061
7062 if (!CONST_INT_P (XEXP (x, 1))
7063 || !CONST_INT_P (XEXP (x, 2))
7064 || GET_MODE (XEXP (x, 0)) == VOIDmode)
7065 return x;
7066
7067 /* Reject MODEs that aren't scalar integers because turning vector
7068 or complex modes into shifts causes problems. */
7069
7070 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
7071 return x;
7072
7073 len = INTVAL (XEXP (x, 1));
7074 pos = INTVAL (XEXP (x, 2));
7075
7076 /* This should stay within the object being extracted, fail otherwise. */
7077 if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
7078 return x;
7079
7080 if (BITS_BIG_ENDIAN)
7081 pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
7082
7083 break;
7084
7085 default:
7086 return x;
7087 }
7088 /* Convert sign extension to zero extension, if we know that the high
7089 bit is not set, as this is easier to optimize. It will be converted
7090 back to cheaper alternative in make_extraction. */
7091 if (GET_CODE (x) == SIGN_EXTEND
7092 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7093 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
7094 & ~(((unsigned HOST_WIDE_INT)
7095 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
7096 >> 1))
7097 == 0)))
7098 {
7099 machine_mode mode = GET_MODE (x);
7100 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7101 rtx temp2 = expand_compound_operation (temp);
7102
7103 /* Make sure this is a profitable operation. */
7104 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7105 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7106 return temp2;
7107 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7108 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7109 return temp;
7110 else
7111 return x;
7112 }
7113
7114 /* We can optimize some special cases of ZERO_EXTEND. */
7115 if (GET_CODE (x) == ZERO_EXTEND)
7116 {
7117 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7118 know that the last value didn't have any inappropriate bits
7119 set. */
7120 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7121 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7122 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7123 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
7124 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7125 return XEXP (XEXP (x, 0), 0);
7126
7127 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7128 if (GET_CODE (XEXP (x, 0)) == SUBREG
7129 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7130 && subreg_lowpart_p (XEXP (x, 0))
7131 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7132 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
7133 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7134 return SUBREG_REG (XEXP (x, 0));
7135
7136 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7137 is a comparison and STORE_FLAG_VALUE permits. This is like
7138 the first case, but it works even when GET_MODE (x) is larger
7139 than HOST_WIDE_INT. */
7140 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7141 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7142 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7143 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7144 <= HOST_BITS_PER_WIDE_INT)
7145 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7146 return XEXP (XEXP (x, 0), 0);
7147
7148 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7149 if (GET_CODE (XEXP (x, 0)) == SUBREG
7150 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7151 && subreg_lowpart_p (XEXP (x, 0))
7152 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7153 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7154 <= HOST_BITS_PER_WIDE_INT)
7155 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7156 return SUBREG_REG (XEXP (x, 0));
7157
7158 }
7159
7160 /* If we reach here, we want to return a pair of shifts. The inner
7161 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7162 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7163 logical depending on the value of UNSIGNEDP.
7164
7165 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7166 converted into an AND of a shift.
7167
7168 We must check for the case where the left shift would have a negative
7169 count. This can happen in a case like (x >> 31) & 255 on machines
7170 that can't shift by a constant. On those machines, we would first
7171 combine the shift with the AND to produce a variable-position
7172 extraction. Then the constant of 31 would be substituted in
7173 to produce such a position. */
7174
7175 modewidth = GET_MODE_PRECISION (GET_MODE (x));
7176 if (modewidth >= pos + len)
7177 {
7178 machine_mode mode = GET_MODE (x);
7179 tem = gen_lowpart (mode, XEXP (x, 0));
7180 if (!tem || GET_CODE (tem) == CLOBBER)
7181 return x;
7182 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7183 tem, modewidth - pos - len);
7184 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7185 mode, tem, modewidth - len);
7186 }
7187 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7188 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
7189 simplify_shift_const (NULL_RTX, LSHIFTRT,
7190 GET_MODE (x),
7191 XEXP (x, 0), pos),
7192 (HOST_WIDE_INT_1U << len) - 1);
7193 else
7194 /* Any other cases we can't handle. */
7195 return x;
7196
7197 /* If we couldn't do this for some reason, return the original
7198 expression. */
7199 if (GET_CODE (tem) == CLOBBER)
7200 return x;
7201
7202 return tem;
7203 }
7204 \f
7205 /* X is a SET which contains an assignment of one object into
7206 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7207 or certain SUBREGS). If possible, convert it into a series of
7208 logical operations.
7209
7210 We half-heartedly support variable positions, but do not at all
7211 support variable lengths. */
7212
7213 static const_rtx
7214 expand_field_assignment (const_rtx x)
7215 {
7216 rtx inner;
7217 rtx pos; /* Always counts from low bit. */
7218 int len;
7219 rtx mask, cleared, masked;
7220 machine_mode compute_mode;
7221
7222 /* Loop until we find something we can't simplify. */
7223 while (1)
7224 {
7225 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7226 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7227 {
7228 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7229 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7230 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7231 }
7232 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7233 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7234 {
7235 inner = XEXP (SET_DEST (x), 0);
7236 len = INTVAL (XEXP (SET_DEST (x), 1));
7237 pos = XEXP (SET_DEST (x), 2);
7238
7239 /* A constant position should stay within the width of INNER. */
7240 if (CONST_INT_P (pos)
7241 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7242 break;
7243
7244 if (BITS_BIG_ENDIAN)
7245 {
7246 if (CONST_INT_P (pos))
7247 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7248 - INTVAL (pos));
7249 else if (GET_CODE (pos) == MINUS
7250 && CONST_INT_P (XEXP (pos, 1))
7251 && (INTVAL (XEXP (pos, 1))
7252 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7253 /* If position is ADJUST - X, new position is X. */
7254 pos = XEXP (pos, 0);
7255 else
7256 {
7257 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7258 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7259 gen_int_mode (prec - len,
7260 GET_MODE (pos)),
7261 pos);
7262 }
7263 }
7264 }
7265
7266 /* A SUBREG between two modes that occupy the same numbers of words
7267 can be done by moving the SUBREG to the source. */
7268 else if (GET_CODE (SET_DEST (x)) == SUBREG
7269 /* We need SUBREGs to compute nonzero_bits properly. */
7270 && nonzero_sign_valid
7271 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7272 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7273 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7274 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7275 {
7276 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7277 gen_lowpart
7278 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7279 SET_SRC (x)));
7280 continue;
7281 }
7282 else
7283 break;
7284
7285 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7286 inner = SUBREG_REG (inner);
7287
7288 compute_mode = GET_MODE (inner);
7289
7290 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7291 if (! SCALAR_INT_MODE_P (compute_mode))
7292 {
7293 machine_mode imode;
7294
7295 /* Don't do anything for vector or complex integral types. */
7296 if (! FLOAT_MODE_P (compute_mode))
7297 break;
7298
7299 /* Try to find an integral mode to pun with. */
7300 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7301 if (imode == BLKmode)
7302 break;
7303
7304 compute_mode = imode;
7305 inner = gen_lowpart (imode, inner);
7306 }
7307
7308 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7309 if (len >= HOST_BITS_PER_WIDE_INT)
7310 break;
7311
7312 /* Don't try to compute in too wide unsupported modes. */
7313 if (!targetm.scalar_mode_supported_p (compute_mode))
7314 break;
7315
7316 /* Now compute the equivalent expression. Make a copy of INNER
7317 for the SET_DEST in case it is a MEM into which we will substitute;
7318 we don't want shared RTL in that case. */
7319 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7320 compute_mode);
7321 cleared = simplify_gen_binary (AND, compute_mode,
7322 simplify_gen_unary (NOT, compute_mode,
7323 simplify_gen_binary (ASHIFT,
7324 compute_mode,
7325 mask, pos),
7326 compute_mode),
7327 inner);
7328 masked = simplify_gen_binary (ASHIFT, compute_mode,
7329 simplify_gen_binary (
7330 AND, compute_mode,
7331 gen_lowpart (compute_mode, SET_SRC (x)),
7332 mask),
7333 pos);
7334
7335 x = gen_rtx_SET (copy_rtx (inner),
7336 simplify_gen_binary (IOR, compute_mode,
7337 cleared, masked));
7338 }
7339
7340 return x;
7341 }
7342 \f
7343 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7344 it is an RTX that represents the (variable) starting position; otherwise,
7345 POS is the (constant) starting bit position. Both are counted from the LSB.
7346
7347 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7348
7349 IN_DEST is nonzero if this is a reference in the destination of a SET.
7350 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7351 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7352 be used.
7353
7354 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7355 ZERO_EXTRACT should be built even for bits starting at bit 0.
7356
7357 MODE is the desired mode of the result (if IN_DEST == 0).
7358
7359 The result is an RTX for the extraction or NULL_RTX if the target
7360 can't handle it. */
7361
7362 static rtx
7363 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7364 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7365 int in_dest, int in_compare)
7366 {
7367 /* This mode describes the size of the storage area
7368 to fetch the overall value from. Within that, we
7369 ignore the POS lowest bits, etc. */
7370 machine_mode is_mode = GET_MODE (inner);
7371 machine_mode inner_mode;
7372 machine_mode wanted_inner_mode;
7373 machine_mode wanted_inner_reg_mode = word_mode;
7374 machine_mode pos_mode = word_mode;
7375 machine_mode extraction_mode = word_mode;
7376 machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7377 rtx new_rtx = 0;
7378 rtx orig_pos_rtx = pos_rtx;
7379 HOST_WIDE_INT orig_pos;
7380
7381 if (pos_rtx && CONST_INT_P (pos_rtx))
7382 pos = INTVAL (pos_rtx), pos_rtx = 0;
7383
7384 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7385 {
7386 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7387 consider just the QI as the memory to extract from.
7388 The subreg adds or removes high bits; its mode is
7389 irrelevant to the meaning of this extraction,
7390 since POS and LEN count from the lsb. */
7391 if (MEM_P (SUBREG_REG (inner)))
7392 is_mode = GET_MODE (SUBREG_REG (inner));
7393 inner = SUBREG_REG (inner);
7394 }
7395 else if (GET_CODE (inner) == ASHIFT
7396 && CONST_INT_P (XEXP (inner, 1))
7397 && pos_rtx == 0 && pos == 0
7398 && len > UINTVAL (XEXP (inner, 1)))
7399 {
7400 /* We're extracting the least significant bits of an rtx
7401 (ashift X (const_int C)), where LEN > C. Extract the
7402 least significant (LEN - C) bits of X, giving an rtx
7403 whose mode is MODE, then shift it left C times. */
7404 new_rtx = make_extraction (mode, XEXP (inner, 0),
7405 0, 0, len - INTVAL (XEXP (inner, 1)),
7406 unsignedp, in_dest, in_compare);
7407 if (new_rtx != 0)
7408 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7409 }
7410 else if (GET_CODE (inner) == TRUNCATE)
7411 inner = XEXP (inner, 0);
7412
7413 inner_mode = GET_MODE (inner);
7414
7415 /* See if this can be done without an extraction. We never can if the
7416 width of the field is not the same as that of some integer mode. For
7417 registers, we can only avoid the extraction if the position is at the
7418 low-order bit and this is either not in the destination or we have the
7419 appropriate STRICT_LOW_PART operation available.
7420
7421 For MEM, we can avoid an extract if the field starts on an appropriate
7422 boundary and we can change the mode of the memory reference. */
7423
7424 if (tmode != BLKmode
7425 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7426 && !MEM_P (inner)
7427 && (pos == 0 || REG_P (inner))
7428 && (inner_mode == tmode
7429 || !REG_P (inner)
7430 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7431 || reg_truncated_to_mode (tmode, inner))
7432 && (! in_dest
7433 || (REG_P (inner)
7434 && have_insn_for (STRICT_LOW_PART, tmode))))
7435 || (MEM_P (inner) && pos_rtx == 0
7436 && (pos
7437 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7438 : BITS_PER_UNIT)) == 0
7439 /* We can't do this if we are widening INNER_MODE (it
7440 may not be aligned, for one thing). */
7441 && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7442 && (inner_mode == tmode
7443 || (! mode_dependent_address_p (XEXP (inner, 0),
7444 MEM_ADDR_SPACE (inner))
7445 && ! MEM_VOLATILE_P (inner))))))
7446 {
7447 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7448 field. If the original and current mode are the same, we need not
7449 adjust the offset. Otherwise, we do if bytes big endian.
7450
7451 If INNER is not a MEM, get a piece consisting of just the field
7452 of interest (in this case POS % BITS_PER_WORD must be 0). */
7453
7454 if (MEM_P (inner))
7455 {
7456 HOST_WIDE_INT offset;
7457
7458 /* POS counts from lsb, but make OFFSET count in memory order. */
7459 if (BYTES_BIG_ENDIAN)
7460 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7461 else
7462 offset = pos / BITS_PER_UNIT;
7463
7464 new_rtx = adjust_address_nv (inner, tmode, offset);
7465 }
7466 else if (REG_P (inner))
7467 {
7468 if (tmode != inner_mode)
7469 {
7470 /* We can't call gen_lowpart in a DEST since we
7471 always want a SUBREG (see below) and it would sometimes
7472 return a new hard register. */
7473 if (pos || in_dest)
7474 {
7475 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7476
7477 if (WORDS_BIG_ENDIAN
7478 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7479 final_word = ((GET_MODE_SIZE (inner_mode)
7480 - GET_MODE_SIZE (tmode))
7481 / UNITS_PER_WORD) - final_word;
7482
7483 final_word *= UNITS_PER_WORD;
7484 if (BYTES_BIG_ENDIAN &&
7485 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7486 final_word += (GET_MODE_SIZE (inner_mode)
7487 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7488
7489 /* Avoid creating invalid subregs, for example when
7490 simplifying (x>>32)&255. */
7491 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7492 return NULL_RTX;
7493
7494 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7495 }
7496 else
7497 new_rtx = gen_lowpart (tmode, inner);
7498 }
7499 else
7500 new_rtx = inner;
7501 }
7502 else
7503 new_rtx = force_to_mode (inner, tmode,
7504 len >= HOST_BITS_PER_WIDE_INT
7505 ? HOST_WIDE_INT_M1U
7506 : (HOST_WIDE_INT_1U << len) - 1, 0);
7507
7508 /* If this extraction is going into the destination of a SET,
7509 make a STRICT_LOW_PART unless we made a MEM. */
7510
7511 if (in_dest)
7512 return (MEM_P (new_rtx) ? new_rtx
7513 : (GET_CODE (new_rtx) != SUBREG
7514 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7515 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7516
7517 if (mode == tmode)
7518 return new_rtx;
7519
7520 if (CONST_SCALAR_INT_P (new_rtx))
7521 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7522 mode, new_rtx, tmode);
7523
7524 /* If we know that no extraneous bits are set, and that the high
7525 bit is not set, convert the extraction to the cheaper of
7526 sign and zero extension, that are equivalent in these cases. */
7527 if (flag_expensive_optimizations
7528 && (HWI_COMPUTABLE_MODE_P (tmode)
7529 && ((nonzero_bits (new_rtx, tmode)
7530 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7531 == 0)))
7532 {
7533 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7534 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7535
7536 /* Prefer ZERO_EXTENSION, since it gives more information to
7537 backends. */
7538 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7539 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7540 return temp;
7541 return temp1;
7542 }
7543
7544 /* Otherwise, sign- or zero-extend unless we already are in the
7545 proper mode. */
7546
7547 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7548 mode, new_rtx));
7549 }
7550
7551 /* Unless this is a COMPARE or we have a funny memory reference,
7552 don't do anything with zero-extending field extracts starting at
7553 the low-order bit since they are simple AND operations. */
7554 if (pos_rtx == 0 && pos == 0 && ! in_dest
7555 && ! in_compare && unsignedp)
7556 return 0;
7557
7558 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7559 if the position is not a constant and the length is not 1. In all
7560 other cases, we would only be going outside our object in cases when
7561 an original shift would have been undefined. */
7562 if (MEM_P (inner)
7563 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7564 || (pos_rtx != 0 && len != 1)))
7565 return 0;
7566
7567 enum extraction_pattern pattern = (in_dest ? EP_insv
7568 : unsignedp ? EP_extzv : EP_extv);
7569
7570 /* If INNER is not from memory, we want it to have the mode of a register
7571 extraction pattern's structure operand, or word_mode if there is no
7572 such pattern. The same applies to extraction_mode and pos_mode
7573 and their respective operands.
7574
7575 For memory, assume that the desired extraction_mode and pos_mode
7576 are the same as for a register operation, since at present we don't
7577 have named patterns for aligned memory structures. */
7578 struct extraction_insn insn;
7579 if (get_best_reg_extraction_insn (&insn, pattern,
7580 GET_MODE_BITSIZE (inner_mode), mode))
7581 {
7582 wanted_inner_reg_mode = insn.struct_mode;
7583 pos_mode = insn.pos_mode;
7584 extraction_mode = insn.field_mode;
7585 }
7586
7587 /* Never narrow an object, since that might not be safe. */
7588
7589 if (mode != VOIDmode
7590 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7591 extraction_mode = mode;
7592
7593 if (!MEM_P (inner))
7594 wanted_inner_mode = wanted_inner_reg_mode;
7595 else
7596 {
7597 /* Be careful not to go beyond the extracted object and maintain the
7598 natural alignment of the memory. */
7599 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7600 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7601 > GET_MODE_BITSIZE (wanted_inner_mode))
7602 {
7603 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7604 gcc_assert (wanted_inner_mode != VOIDmode);
7605 }
7606 }
7607
7608 orig_pos = pos;
7609
7610 if (BITS_BIG_ENDIAN)
7611 {
7612 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7613 BITS_BIG_ENDIAN style. If position is constant, compute new
7614 position. Otherwise, build subtraction.
7615 Note that POS is relative to the mode of the original argument.
7616 If it's a MEM we need to recompute POS relative to that.
7617 However, if we're extracting from (or inserting into) a register,
7618 we want to recompute POS relative to wanted_inner_mode. */
7619 int width = (MEM_P (inner)
7620 ? GET_MODE_BITSIZE (is_mode)
7621 : GET_MODE_BITSIZE (wanted_inner_mode));
7622
7623 if (pos_rtx == 0)
7624 pos = width - len - pos;
7625 else
7626 pos_rtx
7627 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7628 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7629 pos_rtx);
7630 /* POS may be less than 0 now, but we check for that below.
7631 Note that it can only be less than 0 if !MEM_P (inner). */
7632 }
7633
7634 /* If INNER has a wider mode, and this is a constant extraction, try to
7635 make it smaller and adjust the byte to point to the byte containing
7636 the value. */
7637 if (wanted_inner_mode != VOIDmode
7638 && inner_mode != wanted_inner_mode
7639 && ! pos_rtx
7640 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7641 && MEM_P (inner)
7642 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7643 && ! MEM_VOLATILE_P (inner))
7644 {
7645 int offset = 0;
7646
7647 /* The computations below will be correct if the machine is big
7648 endian in both bits and bytes or little endian in bits and bytes.
7649 If it is mixed, we must adjust. */
7650
7651 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7652 adjust OFFSET to compensate. */
7653 if (BYTES_BIG_ENDIAN
7654 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7655 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7656
7657 /* We can now move to the desired byte. */
7658 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7659 * GET_MODE_SIZE (wanted_inner_mode);
7660 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7661
7662 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7663 && is_mode != wanted_inner_mode)
7664 offset = (GET_MODE_SIZE (is_mode)
7665 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7666
7667 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7668 }
7669
7670 /* If INNER is not memory, get it into the proper mode. If we are changing
7671 its mode, POS must be a constant and smaller than the size of the new
7672 mode. */
7673 else if (!MEM_P (inner))
7674 {
7675 /* On the LHS, don't create paradoxical subregs implicitely truncating
7676 the register unless TRULY_NOOP_TRUNCATION. */
7677 if (in_dest
7678 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7679 wanted_inner_mode))
7680 return NULL_RTX;
7681
7682 if (GET_MODE (inner) != wanted_inner_mode
7683 && (pos_rtx != 0
7684 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7685 return NULL_RTX;
7686
7687 if (orig_pos < 0)
7688 return NULL_RTX;
7689
7690 inner = force_to_mode (inner, wanted_inner_mode,
7691 pos_rtx
7692 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7693 ? HOST_WIDE_INT_M1U
7694 : (((HOST_WIDE_INT_1U << len) - 1)
7695 << orig_pos),
7696 0);
7697 }
7698
7699 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7700 have to zero extend. Otherwise, we can just use a SUBREG. */
7701 if (pos_rtx != 0
7702 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7703 {
7704 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7705 GET_MODE (pos_rtx));
7706
7707 /* If we know that no extraneous bits are set, and that the high
7708 bit is not set, convert extraction to cheaper one - either
7709 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7710 cases. */
7711 if (flag_expensive_optimizations
7712 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7713 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7714 & ~(((unsigned HOST_WIDE_INT)
7715 GET_MODE_MASK (GET_MODE (pos_rtx)))
7716 >> 1))
7717 == 0)))
7718 {
7719 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7720 GET_MODE (pos_rtx));
7721
7722 /* Prefer ZERO_EXTENSION, since it gives more information to
7723 backends. */
7724 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7725 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7726 temp = temp1;
7727 }
7728 pos_rtx = temp;
7729 }
7730
7731 /* Make POS_RTX unless we already have it and it is correct. If we don't
7732 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7733 be a CONST_INT. */
7734 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7735 pos_rtx = orig_pos_rtx;
7736
7737 else if (pos_rtx == 0)
7738 pos_rtx = GEN_INT (pos);
7739
7740 /* Make the required operation. See if we can use existing rtx. */
7741 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7742 extraction_mode, inner, GEN_INT (len), pos_rtx);
7743 if (! in_dest)
7744 new_rtx = gen_lowpart (mode, new_rtx);
7745
7746 return new_rtx;
7747 }
7748 \f
7749 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7750 with any other operations in X. Return X without that shift if so. */
7751
7752 static rtx
7753 extract_left_shift (rtx x, int count)
7754 {
7755 enum rtx_code code = GET_CODE (x);
7756 machine_mode mode = GET_MODE (x);
7757 rtx tem;
7758
7759 switch (code)
7760 {
7761 case ASHIFT:
7762 /* This is the shift itself. If it is wide enough, we will return
7763 either the value being shifted if the shift count is equal to
7764 COUNT or a shift for the difference. */
7765 if (CONST_INT_P (XEXP (x, 1))
7766 && INTVAL (XEXP (x, 1)) >= count)
7767 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7768 INTVAL (XEXP (x, 1)) - count);
7769 break;
7770
7771 case NEG: case NOT:
7772 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7773 return simplify_gen_unary (code, mode, tem, mode);
7774
7775 break;
7776
7777 case PLUS: case IOR: case XOR: case AND:
7778 /* If we can safely shift this constant and we find the inner shift,
7779 make a new operation. */
7780 if (CONST_INT_P (XEXP (x, 1))
7781 && (UINTVAL (XEXP (x, 1))
7782 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7783 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7784 {
7785 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7786 return simplify_gen_binary (code, mode, tem,
7787 gen_int_mode (val, mode));
7788 }
7789 break;
7790
7791 default:
7792 break;
7793 }
7794
7795 return 0;
7796 }
7797 \f
7798 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7799 level of the expression and MODE is its mode. IN_CODE is as for
7800 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7801 that should be used when recursing on operands of *X_PTR.
7802
7803 There are two possible actions:
7804
7805 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7806 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7807
7808 - Return a new rtx, which the caller returns directly. */
7809
7810 static rtx
7811 make_compound_operation_int (machine_mode mode, rtx *x_ptr,
7812 enum rtx_code in_code,
7813 enum rtx_code *next_code_ptr)
7814 {
7815 rtx x = *x_ptr;
7816 enum rtx_code next_code = *next_code_ptr;
7817 enum rtx_code code = GET_CODE (x);
7818 int mode_width = GET_MODE_PRECISION (mode);
7819 rtx rhs, lhs;
7820 rtx new_rtx = 0;
7821 int i;
7822 rtx tem;
7823 bool equality_comparison = false;
7824
7825 if (in_code == EQ)
7826 {
7827 equality_comparison = true;
7828 in_code = COMPARE;
7829 }
7830
7831 /* Process depending on the code of this operation. If NEW is set
7832 nonzero, it will be returned. */
7833
7834 switch (code)
7835 {
7836 case ASHIFT:
7837 /* Convert shifts by constants into multiplications if inside
7838 an address. */
7839 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7840 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7841 && INTVAL (XEXP (x, 1)) >= 0)
7842 {
7843 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7844 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7845
7846 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7847 if (GET_CODE (new_rtx) == NEG)
7848 {
7849 new_rtx = XEXP (new_rtx, 0);
7850 multval = -multval;
7851 }
7852 multval = trunc_int_for_mode (multval, mode);
7853 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7854 }
7855 break;
7856
7857 case PLUS:
7858 lhs = XEXP (x, 0);
7859 rhs = XEXP (x, 1);
7860 lhs = make_compound_operation (lhs, next_code);
7861 rhs = make_compound_operation (rhs, next_code);
7862 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7863 {
7864 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7865 XEXP (lhs, 1));
7866 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7867 }
7868 else if (GET_CODE (lhs) == MULT
7869 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7870 {
7871 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7872 simplify_gen_unary (NEG, mode,
7873 XEXP (lhs, 1),
7874 mode));
7875 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7876 }
7877 else
7878 {
7879 SUBST (XEXP (x, 0), lhs);
7880 SUBST (XEXP (x, 1), rhs);
7881 }
7882 maybe_swap_commutative_operands (x);
7883 return x;
7884
7885 case MINUS:
7886 lhs = XEXP (x, 0);
7887 rhs = XEXP (x, 1);
7888 lhs = make_compound_operation (lhs, next_code);
7889 rhs = make_compound_operation (rhs, next_code);
7890 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7891 {
7892 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7893 XEXP (rhs, 1));
7894 return simplify_gen_binary (PLUS, mode, tem, lhs);
7895 }
7896 else if (GET_CODE (rhs) == MULT
7897 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7898 {
7899 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7900 simplify_gen_unary (NEG, mode,
7901 XEXP (rhs, 1),
7902 mode));
7903 return simplify_gen_binary (PLUS, mode, tem, lhs);
7904 }
7905 else
7906 {
7907 SUBST (XEXP (x, 0), lhs);
7908 SUBST (XEXP (x, 1), rhs);
7909 return x;
7910 }
7911
7912 case AND:
7913 /* If the second operand is not a constant, we can't do anything
7914 with it. */
7915 if (!CONST_INT_P (XEXP (x, 1)))
7916 break;
7917
7918 /* If the constant is a power of two minus one and the first operand
7919 is a logical right shift, make an extraction. */
7920 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7921 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7922 {
7923 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7924 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7925 0, in_code == COMPARE);
7926 }
7927
7928 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7929 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7930 && subreg_lowpart_p (XEXP (x, 0))
7931 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7932 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7933 {
7934 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
7935 machine_mode inner_mode = GET_MODE (inner_x0);
7936 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
7937 new_rtx = make_extraction (inner_mode, new_rtx, 0,
7938 XEXP (inner_x0, 1),
7939 i, 1, 0, in_code == COMPARE);
7940
7941 if (new_rtx)
7942 {
7943 /* If we narrowed the mode when dropping the subreg, then
7944 we must zero-extend to keep the semantics of the AND. */
7945 if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
7946 ;
7947 else if (SCALAR_INT_MODE_P (inner_mode))
7948 new_rtx = simplify_gen_unary (ZERO_EXTEND, mode,
7949 new_rtx, inner_mode);
7950 else
7951 new_rtx = NULL;
7952 }
7953
7954 /* If that didn't give anything, see if the AND simplifies on
7955 its own. */
7956 if (!new_rtx && i >= 0)
7957 {
7958 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7959 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
7960 0, in_code == COMPARE);
7961 }
7962 }
7963 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7964 else if ((GET_CODE (XEXP (x, 0)) == XOR
7965 || GET_CODE (XEXP (x, 0)) == IOR)
7966 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7967 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7968 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7969 {
7970 /* Apply the distributive law, and then try to make extractions. */
7971 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7972 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7973 XEXP (x, 1)),
7974 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7975 XEXP (x, 1)));
7976 new_rtx = make_compound_operation (new_rtx, in_code);
7977 }
7978
7979 /* If we are have (and (rotate X C) M) and C is larger than the number
7980 of bits in M, this is an extraction. */
7981
7982 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7983 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7984 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7985 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7986 {
7987 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7988 new_rtx = make_extraction (mode, new_rtx,
7989 (GET_MODE_PRECISION (mode)
7990 - INTVAL (XEXP (XEXP (x, 0), 1))),
7991 NULL_RTX, i, 1, 0, in_code == COMPARE);
7992 }
7993
7994 /* On machines without logical shifts, if the operand of the AND is
7995 a logical shift and our mask turns off all the propagated sign
7996 bits, we can replace the logical shift with an arithmetic shift. */
7997 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7998 && !have_insn_for (LSHIFTRT, mode)
7999 && have_insn_for (ASHIFTRT, mode)
8000 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8001 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8002 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8003 && mode_width <= HOST_BITS_PER_WIDE_INT)
8004 {
8005 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8006
8007 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8008 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8009 SUBST (XEXP (x, 0),
8010 gen_rtx_ASHIFTRT (mode,
8011 make_compound_operation
8012 (XEXP (XEXP (x, 0), 0), next_code),
8013 XEXP (XEXP (x, 0), 1)));
8014 }
8015
8016 /* If the constant is one less than a power of two, this might be
8017 representable by an extraction even if no shift is present.
8018 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8019 we are in a COMPARE. */
8020 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8021 new_rtx = make_extraction (mode,
8022 make_compound_operation (XEXP (x, 0),
8023 next_code),
8024 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8025
8026 /* If we are in a comparison and this is an AND with a power of two,
8027 convert this into the appropriate bit extract. */
8028 else if (in_code == COMPARE
8029 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8030 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8031 new_rtx = make_extraction (mode,
8032 make_compound_operation (XEXP (x, 0),
8033 next_code),
8034 i, NULL_RTX, 1, 1, 0, 1);
8035
8036 /* If the one operand is a paradoxical subreg of a register or memory and
8037 the constant (limited to the smaller mode) has only zero bits where
8038 the sub expression has known zero bits, this can be expressed as
8039 a zero_extend. */
8040 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8041 {
8042 rtx sub;
8043
8044 sub = XEXP (XEXP (x, 0), 0);
8045 machine_mode sub_mode = GET_MODE (sub);
8046 if ((REG_P (sub) || MEM_P (sub))
8047 && GET_MODE_PRECISION (sub_mode) < mode_width)
8048 {
8049 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8050 unsigned HOST_WIDE_INT mask;
8051
8052 /* original AND constant with all the known zero bits set */
8053 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8054 if ((mask & mode_mask) == mode_mask)
8055 {
8056 new_rtx = make_compound_operation (sub, next_code);
8057 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8058 GET_MODE_PRECISION (sub_mode),
8059 1, 0, in_code == COMPARE);
8060 }
8061 }
8062 }
8063
8064 break;
8065
8066 case LSHIFTRT:
8067 /* If the sign bit is known to be zero, replace this with an
8068 arithmetic shift. */
8069 if (have_insn_for (ASHIFTRT, mode)
8070 && ! have_insn_for (LSHIFTRT, mode)
8071 && mode_width <= HOST_BITS_PER_WIDE_INT
8072 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8073 {
8074 new_rtx = gen_rtx_ASHIFTRT (mode,
8075 make_compound_operation (XEXP (x, 0),
8076 next_code),
8077 XEXP (x, 1));
8078 break;
8079 }
8080
8081 /* fall through */
8082
8083 case ASHIFTRT:
8084 lhs = XEXP (x, 0);
8085 rhs = XEXP (x, 1);
8086
8087 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8088 this is a SIGN_EXTRACT. */
8089 if (CONST_INT_P (rhs)
8090 && GET_CODE (lhs) == ASHIFT
8091 && CONST_INT_P (XEXP (lhs, 1))
8092 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8093 && INTVAL (XEXP (lhs, 1)) >= 0
8094 && INTVAL (rhs) < mode_width)
8095 {
8096 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8097 new_rtx = make_extraction (mode, new_rtx,
8098 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8099 NULL_RTX, mode_width - INTVAL (rhs),
8100 code == LSHIFTRT, 0, in_code == COMPARE);
8101 break;
8102 }
8103
8104 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8105 If so, try to merge the shifts into a SIGN_EXTEND. We could
8106 also do this for some cases of SIGN_EXTRACT, but it doesn't
8107 seem worth the effort; the case checked for occurs on Alpha. */
8108
8109 if (!OBJECT_P (lhs)
8110 && ! (GET_CODE (lhs) == SUBREG
8111 && (OBJECT_P (SUBREG_REG (lhs))))
8112 && CONST_INT_P (rhs)
8113 && INTVAL (rhs) >= 0
8114 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8115 && INTVAL (rhs) < mode_width
8116 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
8117 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
8118 0, NULL_RTX, mode_width - INTVAL (rhs),
8119 code == LSHIFTRT, 0, in_code == COMPARE);
8120
8121 break;
8122
8123 case SUBREG:
8124 /* Call ourselves recursively on the inner expression. If we are
8125 narrowing the object and it has a different RTL code from
8126 what it originally did, do this SUBREG as a force_to_mode. */
8127 {
8128 rtx inner = SUBREG_REG (x), simplified;
8129 enum rtx_code subreg_code = in_code;
8130
8131 /* If the SUBREG is masking of a logical right shift,
8132 make an extraction. */
8133 if (GET_CODE (inner) == LSHIFTRT
8134 && CONST_INT_P (XEXP (inner, 1))
8135 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8136 && (UINTVAL (XEXP (inner, 1))
8137 < GET_MODE_PRECISION (GET_MODE (inner)))
8138 && subreg_lowpart_p (x))
8139 {
8140 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8141 int width = GET_MODE_PRECISION (GET_MODE (inner))
8142 - INTVAL (XEXP (inner, 1));
8143 if (width > mode_width)
8144 width = mode_width;
8145 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8146 width, 1, 0, in_code == COMPARE);
8147 break;
8148 }
8149
8150 /* If in_code is COMPARE, it isn't always safe to pass it through
8151 to the recursive make_compound_operation call. */
8152 if (subreg_code == COMPARE
8153 && (!subreg_lowpart_p (x)
8154 || GET_CODE (inner) == SUBREG
8155 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8156 is (const_int 0), rather than
8157 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
8158 || (GET_CODE (inner) == AND
8159 && CONST_INT_P (XEXP (inner, 1))
8160 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8161 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8162 >= GET_MODE_BITSIZE (mode))))
8163 subreg_code = SET;
8164
8165 tem = make_compound_operation (inner, subreg_code);
8166
8167 simplified
8168 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8169 if (simplified)
8170 tem = simplified;
8171
8172 if (GET_CODE (tem) != GET_CODE (inner)
8173 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8174 && subreg_lowpart_p (x))
8175 {
8176 rtx newer
8177 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8178
8179 /* If we have something other than a SUBREG, we might have
8180 done an expansion, so rerun ourselves. */
8181 if (GET_CODE (newer) != SUBREG)
8182 newer = make_compound_operation (newer, in_code);
8183
8184 /* force_to_mode can expand compounds. If it just re-expanded the
8185 compound, use gen_lowpart to convert to the desired mode. */
8186 if (rtx_equal_p (newer, x)
8187 /* Likewise if it re-expanded the compound only partially.
8188 This happens for SUBREG of ZERO_EXTRACT if they extract
8189 the same number of bits. */
8190 || (GET_CODE (newer) == SUBREG
8191 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8192 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8193 && GET_CODE (inner) == AND
8194 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8195 return gen_lowpart (GET_MODE (x), tem);
8196
8197 return newer;
8198 }
8199
8200 if (simplified)
8201 return tem;
8202 }
8203 break;
8204
8205 default:
8206 break;
8207 }
8208
8209 if (new_rtx)
8210 *x_ptr = gen_lowpart (mode, new_rtx);
8211 *next_code_ptr = next_code;
8212 return NULL_RTX;
8213 }
8214
8215 /* Look at the expression rooted at X. Look for expressions
8216 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8217 Form these expressions.
8218
8219 Return the new rtx, usually just X.
8220
8221 Also, for machines like the VAX that don't have logical shift insns,
8222 try to convert logical to arithmetic shift operations in cases where
8223 they are equivalent. This undoes the canonicalizations to logical
8224 shifts done elsewhere.
8225
8226 We try, as much as possible, to re-use rtl expressions to save memory.
8227
8228 IN_CODE says what kind of expression we are processing. Normally, it is
8229 SET. In a memory address it is MEM. When processing the arguments of
8230 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8231 precisely it is an equality comparison against zero. */
8232
8233 rtx
8234 make_compound_operation (rtx x, enum rtx_code in_code)
8235 {
8236 enum rtx_code code = GET_CODE (x);
8237 const char *fmt;
8238 int i, j;
8239 enum rtx_code next_code;
8240 rtx new_rtx, tem;
8241
8242 /* Select the code to be used in recursive calls. Once we are inside an
8243 address, we stay there. If we have a comparison, set to COMPARE,
8244 but once inside, go back to our default of SET. */
8245
8246 next_code = (code == MEM ? MEM
8247 : ((code == COMPARE || COMPARISON_P (x))
8248 && XEXP (x, 1) == const0_rtx) ? COMPARE
8249 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8250
8251 if (SCALAR_INT_MODE_P (GET_MODE (x)))
8252 {
8253 rtx new_rtx = make_compound_operation_int (GET_MODE (x), &x,
8254 in_code, &next_code);
8255 if (new_rtx)
8256 return new_rtx;
8257 code = GET_CODE (x);
8258 }
8259
8260 /* Now recursively process each operand of this operation. We need to
8261 handle ZERO_EXTEND specially so that we don't lose track of the
8262 inner mode. */
8263 if (code == ZERO_EXTEND)
8264 {
8265 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8266 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8267 new_rtx, GET_MODE (XEXP (x, 0)));
8268 if (tem)
8269 return tem;
8270 SUBST (XEXP (x, 0), new_rtx);
8271 return x;
8272 }
8273
8274 fmt = GET_RTX_FORMAT (code);
8275 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8276 if (fmt[i] == 'e')
8277 {
8278 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8279 SUBST (XEXP (x, i), new_rtx);
8280 }
8281 else if (fmt[i] == 'E')
8282 for (j = 0; j < XVECLEN (x, i); j++)
8283 {
8284 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8285 SUBST (XVECEXP (x, i, j), new_rtx);
8286 }
8287
8288 maybe_swap_commutative_operands (x);
8289 return x;
8290 }
8291 \f
8292 /* Given M see if it is a value that would select a field of bits
8293 within an item, but not the entire word. Return -1 if not.
8294 Otherwise, return the starting position of the field, where 0 is the
8295 low-order bit.
8296
8297 *PLEN is set to the length of the field. */
8298
8299 static int
8300 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8301 {
8302 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8303 int pos = m ? ctz_hwi (m) : -1;
8304 int len = 0;
8305
8306 if (pos >= 0)
8307 /* Now shift off the low-order zero bits and see if we have a
8308 power of two minus 1. */
8309 len = exact_log2 ((m >> pos) + 1);
8310
8311 if (len <= 0)
8312 pos = -1;
8313
8314 *plen = len;
8315 return pos;
8316 }
8317 \f
8318 /* If X refers to a register that equals REG in value, replace these
8319 references with REG. */
8320 static rtx
8321 canon_reg_for_combine (rtx x, rtx reg)
8322 {
8323 rtx op0, op1, op2;
8324 const char *fmt;
8325 int i;
8326 bool copied;
8327
8328 enum rtx_code code = GET_CODE (x);
8329 switch (GET_RTX_CLASS (code))
8330 {
8331 case RTX_UNARY:
8332 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8333 if (op0 != XEXP (x, 0))
8334 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8335 GET_MODE (reg));
8336 break;
8337
8338 case RTX_BIN_ARITH:
8339 case RTX_COMM_ARITH:
8340 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8341 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8342 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8343 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8344 break;
8345
8346 case RTX_COMPARE:
8347 case RTX_COMM_COMPARE:
8348 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8349 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8350 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8351 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8352 GET_MODE (op0), op0, op1);
8353 break;
8354
8355 case RTX_TERNARY:
8356 case RTX_BITFIELD_OPS:
8357 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8358 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8359 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8360 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8361 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8362 GET_MODE (op0), op0, op1, op2);
8363 /* FALLTHRU */
8364
8365 case RTX_OBJ:
8366 if (REG_P (x))
8367 {
8368 if (rtx_equal_p (get_last_value (reg), x)
8369 || rtx_equal_p (reg, get_last_value (x)))
8370 return reg;
8371 else
8372 break;
8373 }
8374
8375 /* fall through */
8376
8377 default:
8378 fmt = GET_RTX_FORMAT (code);
8379 copied = false;
8380 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8381 if (fmt[i] == 'e')
8382 {
8383 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8384 if (op != XEXP (x, i))
8385 {
8386 if (!copied)
8387 {
8388 copied = true;
8389 x = copy_rtx (x);
8390 }
8391 XEXP (x, i) = op;
8392 }
8393 }
8394 else if (fmt[i] == 'E')
8395 {
8396 int j;
8397 for (j = 0; j < XVECLEN (x, i); j++)
8398 {
8399 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8400 if (op != XVECEXP (x, i, j))
8401 {
8402 if (!copied)
8403 {
8404 copied = true;
8405 x = copy_rtx (x);
8406 }
8407 XVECEXP (x, i, j) = op;
8408 }
8409 }
8410 }
8411
8412 break;
8413 }
8414
8415 return x;
8416 }
8417
8418 /* Return X converted to MODE. If the value is already truncated to
8419 MODE we can just return a subreg even though in the general case we
8420 would need an explicit truncation. */
8421
8422 static rtx
8423 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8424 {
8425 if (!CONST_INT_P (x)
8426 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8427 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8428 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8429 {
8430 /* Bit-cast X into an integer mode. */
8431 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8432 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8433 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8434 x, GET_MODE (x));
8435 }
8436
8437 return gen_lowpart (mode, x);
8438 }
8439
8440 /* See if X can be simplified knowing that we will only refer to it in
8441 MODE and will only refer to those bits that are nonzero in MASK.
8442 If other bits are being computed or if masking operations are done
8443 that select a superset of the bits in MASK, they can sometimes be
8444 ignored.
8445
8446 Return a possibly simplified expression, but always convert X to
8447 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8448
8449 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8450 are all off in X. This is used when X will be complemented, by either
8451 NOT, NEG, or XOR. */
8452
8453 static rtx
8454 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8455 int just_select)
8456 {
8457 enum rtx_code code = GET_CODE (x);
8458 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8459 machine_mode op_mode;
8460 unsigned HOST_WIDE_INT fuller_mask, nonzero;
8461 rtx op0, op1, temp;
8462
8463 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8464 code below will do the wrong thing since the mode of such an
8465 expression is VOIDmode.
8466
8467 Also do nothing if X is a CLOBBER; this can happen if X was
8468 the return value from a call to gen_lowpart. */
8469 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8470 return x;
8471
8472 /* We want to perform the operation in its present mode unless we know
8473 that the operation is valid in MODE, in which case we do the operation
8474 in MODE. */
8475 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8476 && have_insn_for (code, mode))
8477 ? mode : GET_MODE (x));
8478
8479 /* It is not valid to do a right-shift in a narrower mode
8480 than the one it came in with. */
8481 if ((code == LSHIFTRT || code == ASHIFTRT)
8482 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8483 op_mode = GET_MODE (x);
8484
8485 /* Truncate MASK to fit OP_MODE. */
8486 if (op_mode)
8487 mask &= GET_MODE_MASK (op_mode);
8488
8489 /* When we have an arithmetic operation, or a shift whose count we
8490 do not know, we need to assume that all bits up to the highest-order
8491 bit in MASK will be needed. This is how we form such a mask. */
8492 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8493 fuller_mask = HOST_WIDE_INT_M1U;
8494 else
8495 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8496 - 1);
8497
8498 /* Determine what bits of X are guaranteed to be (non)zero. */
8499 nonzero = nonzero_bits (x, mode);
8500
8501 /* If none of the bits in X are needed, return a zero. */
8502 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8503 x = const0_rtx;
8504
8505 /* If X is a CONST_INT, return a new one. Do this here since the
8506 test below will fail. */
8507 if (CONST_INT_P (x))
8508 {
8509 if (SCALAR_INT_MODE_P (mode))
8510 return gen_int_mode (INTVAL (x) & mask, mode);
8511 else
8512 {
8513 x = GEN_INT (INTVAL (x) & mask);
8514 return gen_lowpart_common (mode, x);
8515 }
8516 }
8517
8518 /* If X is narrower than MODE and we want all the bits in X's mode, just
8519 get X in the proper mode. */
8520 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8521 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8522 return gen_lowpart (mode, x);
8523
8524 /* We can ignore the effect of a SUBREG if it narrows the mode or
8525 if the constant masks to zero all the bits the mode doesn't have. */
8526 if (GET_CODE (x) == SUBREG
8527 && subreg_lowpart_p (x)
8528 && ((GET_MODE_SIZE (GET_MODE (x))
8529 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8530 || (0 == (mask
8531 & GET_MODE_MASK (GET_MODE (x))
8532 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8533 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8534
8535 /* The arithmetic simplifications here only work for scalar integer modes. */
8536 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8537 return gen_lowpart_or_truncate (mode, x);
8538
8539 switch (code)
8540 {
8541 case CLOBBER:
8542 /* If X is a (clobber (const_int)), return it since we know we are
8543 generating something that won't match. */
8544 return x;
8545
8546 case SIGN_EXTEND:
8547 case ZERO_EXTEND:
8548 case ZERO_EXTRACT:
8549 case SIGN_EXTRACT:
8550 x = expand_compound_operation (x);
8551 if (GET_CODE (x) != code)
8552 return force_to_mode (x, mode, mask, next_select);
8553 break;
8554
8555 case TRUNCATE:
8556 /* Similarly for a truncate. */
8557 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8558
8559 case AND:
8560 /* If this is an AND with a constant, convert it into an AND
8561 whose constant is the AND of that constant with MASK. If it
8562 remains an AND of MASK, delete it since it is redundant. */
8563
8564 if (CONST_INT_P (XEXP (x, 1)))
8565 {
8566 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8567 mask & INTVAL (XEXP (x, 1)));
8568
8569 /* If X is still an AND, see if it is an AND with a mask that
8570 is just some low-order bits. If so, and it is MASK, we don't
8571 need it. */
8572
8573 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8574 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8575 == mask))
8576 x = XEXP (x, 0);
8577
8578 /* If it remains an AND, try making another AND with the bits
8579 in the mode mask that aren't in MASK turned on. If the
8580 constant in the AND is wide enough, this might make a
8581 cheaper constant. */
8582
8583 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8584 && GET_MODE_MASK (GET_MODE (x)) != mask
8585 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8586 {
8587 unsigned HOST_WIDE_INT cval
8588 = UINTVAL (XEXP (x, 1))
8589 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8590 rtx y;
8591
8592 y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0),
8593 gen_int_mode (cval, GET_MODE (x)));
8594 if (set_src_cost (y, GET_MODE (x), optimize_this_for_speed_p)
8595 < set_src_cost (x, GET_MODE (x), optimize_this_for_speed_p))
8596 x = y;
8597 }
8598
8599 break;
8600 }
8601
8602 goto binop;
8603
8604 case PLUS:
8605 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8606 low-order bits (as in an alignment operation) and FOO is already
8607 aligned to that boundary, mask C1 to that boundary as well.
8608 This may eliminate that PLUS and, later, the AND. */
8609
8610 {
8611 unsigned int width = GET_MODE_PRECISION (mode);
8612 unsigned HOST_WIDE_INT smask = mask;
8613
8614 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8615 number, sign extend it. */
8616
8617 if (width < HOST_BITS_PER_WIDE_INT
8618 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8619 smask |= HOST_WIDE_INT_M1U << width;
8620
8621 if (CONST_INT_P (XEXP (x, 1))
8622 && pow2p_hwi (- smask)
8623 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8624 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8625 return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8626 (INTVAL (XEXP (x, 1)) & smask)),
8627 mode, smask, next_select);
8628 }
8629
8630 /* fall through */
8631
8632 case MULT:
8633 /* Substituting into the operands of a widening MULT is not likely to
8634 create RTL matching a machine insn. */
8635 if (code == MULT
8636 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8637 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8638 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8639 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8640 && REG_P (XEXP (XEXP (x, 0), 0))
8641 && REG_P (XEXP (XEXP (x, 1), 0)))
8642 return gen_lowpart_or_truncate (mode, x);
8643
8644 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8645 most significant bit in MASK since carries from those bits will
8646 affect the bits we are interested in. */
8647 mask = fuller_mask;
8648 goto binop;
8649
8650 case MINUS:
8651 /* If X is (minus C Y) where C's least set bit is larger than any bit
8652 in the mask, then we may replace with (neg Y). */
8653 if (CONST_INT_P (XEXP (x, 0))
8654 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8655 {
8656 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8657 GET_MODE (x));
8658 return force_to_mode (x, mode, mask, next_select);
8659 }
8660
8661 /* Similarly, if C contains every bit in the fuller_mask, then we may
8662 replace with (not Y). */
8663 if (CONST_INT_P (XEXP (x, 0))
8664 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8665 {
8666 x = simplify_gen_unary (NOT, GET_MODE (x),
8667 XEXP (x, 1), GET_MODE (x));
8668 return force_to_mode (x, mode, mask, next_select);
8669 }
8670
8671 mask = fuller_mask;
8672 goto binop;
8673
8674 case IOR:
8675 case XOR:
8676 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8677 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8678 operation which may be a bitfield extraction. Ensure that the
8679 constant we form is not wider than the mode of X. */
8680
8681 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8682 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8683 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8684 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8685 && CONST_INT_P (XEXP (x, 1))
8686 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8687 + floor_log2 (INTVAL (XEXP (x, 1))))
8688 < GET_MODE_PRECISION (GET_MODE (x)))
8689 && (UINTVAL (XEXP (x, 1))
8690 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8691 {
8692 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8693 << INTVAL (XEXP (XEXP (x, 0), 1)),
8694 GET_MODE (x));
8695 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8696 XEXP (XEXP (x, 0), 0), temp);
8697 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8698 XEXP (XEXP (x, 0), 1));
8699 return force_to_mode (x, mode, mask, next_select);
8700 }
8701
8702 binop:
8703 /* For most binary operations, just propagate into the operation and
8704 change the mode if we have an operation of that mode. */
8705
8706 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8707 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8708
8709 /* If we ended up truncating both operands, truncate the result of the
8710 operation instead. */
8711 if (GET_CODE (op0) == TRUNCATE
8712 && GET_CODE (op1) == TRUNCATE)
8713 {
8714 op0 = XEXP (op0, 0);
8715 op1 = XEXP (op1, 0);
8716 }
8717
8718 op0 = gen_lowpart_or_truncate (op_mode, op0);
8719 op1 = gen_lowpart_or_truncate (op_mode, op1);
8720
8721 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8722 x = simplify_gen_binary (code, op_mode, op0, op1);
8723 break;
8724
8725 case ASHIFT:
8726 /* For left shifts, do the same, but just for the first operand.
8727 However, we cannot do anything with shifts where we cannot
8728 guarantee that the counts are smaller than the size of the mode
8729 because such a count will have a different meaning in a
8730 wider mode. */
8731
8732 if (! (CONST_INT_P (XEXP (x, 1))
8733 && INTVAL (XEXP (x, 1)) >= 0
8734 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8735 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8736 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8737 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8738 break;
8739
8740 /* If the shift count is a constant and we can do arithmetic in
8741 the mode of the shift, refine which bits we need. Otherwise, use the
8742 conservative form of the mask. */
8743 if (CONST_INT_P (XEXP (x, 1))
8744 && INTVAL (XEXP (x, 1)) >= 0
8745 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8746 && HWI_COMPUTABLE_MODE_P (op_mode))
8747 mask >>= INTVAL (XEXP (x, 1));
8748 else
8749 mask = fuller_mask;
8750
8751 op0 = gen_lowpart_or_truncate (op_mode,
8752 force_to_mode (XEXP (x, 0), op_mode,
8753 mask, next_select));
8754
8755 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8756 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8757 break;
8758
8759 case LSHIFTRT:
8760 /* Here we can only do something if the shift count is a constant,
8761 this shift constant is valid for the host, and we can do arithmetic
8762 in OP_MODE. */
8763
8764 if (CONST_INT_P (XEXP (x, 1))
8765 && INTVAL (XEXP (x, 1)) >= 0
8766 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8767 && HWI_COMPUTABLE_MODE_P (op_mode))
8768 {
8769 rtx inner = XEXP (x, 0);
8770 unsigned HOST_WIDE_INT inner_mask;
8771
8772 /* Select the mask of the bits we need for the shift operand. */
8773 inner_mask = mask << INTVAL (XEXP (x, 1));
8774
8775 /* We can only change the mode of the shift if we can do arithmetic
8776 in the mode of the shift and INNER_MASK is no wider than the
8777 width of X's mode. */
8778 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8779 op_mode = GET_MODE (x);
8780
8781 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8782
8783 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8784 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8785 }
8786
8787 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8788 shift and AND produces only copies of the sign bit (C2 is one less
8789 than a power of two), we can do this with just a shift. */
8790
8791 if (GET_CODE (x) == LSHIFTRT
8792 && CONST_INT_P (XEXP (x, 1))
8793 /* The shift puts one of the sign bit copies in the least significant
8794 bit. */
8795 && ((INTVAL (XEXP (x, 1))
8796 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8797 >= GET_MODE_PRECISION (GET_MODE (x)))
8798 && pow2p_hwi (mask + 1)
8799 /* Number of bits left after the shift must be more than the mask
8800 needs. */
8801 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8802 <= GET_MODE_PRECISION (GET_MODE (x)))
8803 /* Must be more sign bit copies than the mask needs. */
8804 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8805 >= exact_log2 (mask + 1)))
8806 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8807 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8808 - exact_log2 (mask + 1)));
8809
8810 goto shiftrt;
8811
8812 case ASHIFTRT:
8813 /* If we are just looking for the sign bit, we don't need this shift at
8814 all, even if it has a variable count. */
8815 if (val_signbit_p (GET_MODE (x), mask))
8816 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8817
8818 /* If this is a shift by a constant, get a mask that contains those bits
8819 that are not copies of the sign bit. We then have two cases: If
8820 MASK only includes those bits, this can be a logical shift, which may
8821 allow simplifications. If MASK is a single-bit field not within
8822 those bits, we are requesting a copy of the sign bit and hence can
8823 shift the sign bit to the appropriate location. */
8824
8825 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8826 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8827 {
8828 int i;
8829
8830 /* If the considered data is wider than HOST_WIDE_INT, we can't
8831 represent a mask for all its bits in a single scalar.
8832 But we only care about the lower bits, so calculate these. */
8833
8834 if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8835 {
8836 nonzero = HOST_WIDE_INT_M1U;
8837
8838 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8839 is the number of bits a full-width mask would have set.
8840 We need only shift if these are fewer than nonzero can
8841 hold. If not, we must keep all bits set in nonzero. */
8842
8843 if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8844 < HOST_BITS_PER_WIDE_INT)
8845 nonzero >>= INTVAL (XEXP (x, 1))
8846 + HOST_BITS_PER_WIDE_INT
8847 - GET_MODE_PRECISION (GET_MODE (x)) ;
8848 }
8849 else
8850 {
8851 nonzero = GET_MODE_MASK (GET_MODE (x));
8852 nonzero >>= INTVAL (XEXP (x, 1));
8853 }
8854
8855 if ((mask & ~nonzero) == 0)
8856 {
8857 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8858 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8859 if (GET_CODE (x) != ASHIFTRT)
8860 return force_to_mode (x, mode, mask, next_select);
8861 }
8862
8863 else if ((i = exact_log2 (mask)) >= 0)
8864 {
8865 x = simplify_shift_const
8866 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8867 GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8868
8869 if (GET_CODE (x) != ASHIFTRT)
8870 return force_to_mode (x, mode, mask, next_select);
8871 }
8872 }
8873
8874 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8875 even if the shift count isn't a constant. */
8876 if (mask == 1)
8877 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8878 XEXP (x, 0), XEXP (x, 1));
8879
8880 shiftrt:
8881
8882 /* If this is a zero- or sign-extension operation that just affects bits
8883 we don't care about, remove it. Be sure the call above returned
8884 something that is still a shift. */
8885
8886 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8887 && CONST_INT_P (XEXP (x, 1))
8888 && INTVAL (XEXP (x, 1)) >= 0
8889 && (INTVAL (XEXP (x, 1))
8890 <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8891 && GET_CODE (XEXP (x, 0)) == ASHIFT
8892 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8893 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8894 next_select);
8895
8896 break;
8897
8898 case ROTATE:
8899 case ROTATERT:
8900 /* If the shift count is constant and we can do computations
8901 in the mode of X, compute where the bits we care about are.
8902 Otherwise, we can't do anything. Don't change the mode of
8903 the shift or propagate MODE into the shift, though. */
8904 if (CONST_INT_P (XEXP (x, 1))
8905 && INTVAL (XEXP (x, 1)) >= 0)
8906 {
8907 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8908 GET_MODE (x),
8909 gen_int_mode (mask, GET_MODE (x)),
8910 XEXP (x, 1));
8911 if (temp && CONST_INT_P (temp))
8912 x = simplify_gen_binary (code, GET_MODE (x),
8913 force_to_mode (XEXP (x, 0), GET_MODE (x),
8914 INTVAL (temp), next_select),
8915 XEXP (x, 1));
8916 }
8917 break;
8918
8919 case NEG:
8920 /* If we just want the low-order bit, the NEG isn't needed since it
8921 won't change the low-order bit. */
8922 if (mask == 1)
8923 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8924
8925 /* We need any bits less significant than the most significant bit in
8926 MASK since carries from those bits will affect the bits we are
8927 interested in. */
8928 mask = fuller_mask;
8929 goto unop;
8930
8931 case NOT:
8932 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8933 same as the XOR case above. Ensure that the constant we form is not
8934 wider than the mode of X. */
8935
8936 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8937 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8938 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8939 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8940 < GET_MODE_PRECISION (GET_MODE (x)))
8941 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8942 {
8943 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8944 GET_MODE (x));
8945 temp = simplify_gen_binary (XOR, GET_MODE (x),
8946 XEXP (XEXP (x, 0), 0), temp);
8947 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8948 temp, XEXP (XEXP (x, 0), 1));
8949
8950 return force_to_mode (x, mode, mask, next_select);
8951 }
8952
8953 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8954 use the full mask inside the NOT. */
8955 mask = fuller_mask;
8956
8957 unop:
8958 op0 = gen_lowpart_or_truncate (op_mode,
8959 force_to_mode (XEXP (x, 0), mode, mask,
8960 next_select));
8961 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8962 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8963 break;
8964
8965 case NE:
8966 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8967 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8968 which is equal to STORE_FLAG_VALUE. */
8969 if ((mask & ~STORE_FLAG_VALUE) == 0
8970 && XEXP (x, 1) == const0_rtx
8971 && GET_MODE (XEXP (x, 0)) == mode
8972 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
8973 && (nonzero_bits (XEXP (x, 0), mode)
8974 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8975 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8976
8977 break;
8978
8979 case IF_THEN_ELSE:
8980 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8981 written in a narrower mode. We play it safe and do not do so. */
8982
8983 op0 = gen_lowpart_or_truncate (GET_MODE (x),
8984 force_to_mode (XEXP (x, 1), mode,
8985 mask, next_select));
8986 op1 = gen_lowpart_or_truncate (GET_MODE (x),
8987 force_to_mode (XEXP (x, 2), mode,
8988 mask, next_select));
8989 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
8990 x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
8991 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
8992 op0, op1);
8993 break;
8994
8995 default:
8996 break;
8997 }
8998
8999 /* Ensure we return a value of the proper mode. */
9000 return gen_lowpart_or_truncate (mode, x);
9001 }
9002 \f
9003 /* Return nonzero if X is an expression that has one of two values depending on
9004 whether some other value is zero or nonzero. In that case, we return the
9005 value that is being tested, *PTRUE is set to the value if the rtx being
9006 returned has a nonzero value, and *PFALSE is set to the other alternative.
9007
9008 If we return zero, we set *PTRUE and *PFALSE to X. */
9009
9010 static rtx
9011 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9012 {
9013 machine_mode mode = GET_MODE (x);
9014 enum rtx_code code = GET_CODE (x);
9015 rtx cond0, cond1, true0, true1, false0, false1;
9016 unsigned HOST_WIDE_INT nz;
9017
9018 /* If we are comparing a value against zero, we are done. */
9019 if ((code == NE || code == EQ)
9020 && XEXP (x, 1) == const0_rtx)
9021 {
9022 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9023 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9024 return XEXP (x, 0);
9025 }
9026
9027 /* If this is a unary operation whose operand has one of two values, apply
9028 our opcode to compute those values. */
9029 else if (UNARY_P (x)
9030 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9031 {
9032 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9033 *pfalse = simplify_gen_unary (code, mode, false0,
9034 GET_MODE (XEXP (x, 0)));
9035 return cond0;
9036 }
9037
9038 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9039 make can't possibly match and would suppress other optimizations. */
9040 else if (code == COMPARE)
9041 ;
9042
9043 /* If this is a binary operation, see if either side has only one of two
9044 values. If either one does or if both do and they are conditional on
9045 the same value, compute the new true and false values. */
9046 else if (BINARY_P (x))
9047 {
9048 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
9049 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
9050
9051 if ((cond0 != 0 || cond1 != 0)
9052 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
9053 {
9054 /* If if_then_else_cond returned zero, then true/false are the
9055 same rtl. We must copy one of them to prevent invalid rtl
9056 sharing. */
9057 if (cond0 == 0)
9058 true0 = copy_rtx (true0);
9059 else if (cond1 == 0)
9060 true1 = copy_rtx (true1);
9061
9062 if (COMPARISON_P (x))
9063 {
9064 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9065 true0, true1);
9066 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9067 false0, false1);
9068 }
9069 else
9070 {
9071 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9072 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9073 }
9074
9075 return cond0 ? cond0 : cond1;
9076 }
9077
9078 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9079 operands is zero when the other is nonzero, and vice-versa,
9080 and STORE_FLAG_VALUE is 1 or -1. */
9081
9082 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9083 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9084 || code == UMAX)
9085 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9086 {
9087 rtx op0 = XEXP (XEXP (x, 0), 1);
9088 rtx op1 = XEXP (XEXP (x, 1), 1);
9089
9090 cond0 = XEXP (XEXP (x, 0), 0);
9091 cond1 = XEXP (XEXP (x, 1), 0);
9092
9093 if (COMPARISON_P (cond0)
9094 && COMPARISON_P (cond1)
9095 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9096 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9097 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9098 || ((swap_condition (GET_CODE (cond0))
9099 == reversed_comparison_code (cond1, NULL))
9100 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9101 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9102 && ! side_effects_p (x))
9103 {
9104 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9105 *pfalse = simplify_gen_binary (MULT, mode,
9106 (code == MINUS
9107 ? simplify_gen_unary (NEG, mode,
9108 op1, mode)
9109 : op1),
9110 const_true_rtx);
9111 return cond0;
9112 }
9113 }
9114
9115 /* Similarly for MULT, AND and UMIN, except that for these the result
9116 is always zero. */
9117 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9118 && (code == MULT || code == AND || code == UMIN)
9119 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9120 {
9121 cond0 = XEXP (XEXP (x, 0), 0);
9122 cond1 = XEXP (XEXP (x, 1), 0);
9123
9124 if (COMPARISON_P (cond0)
9125 && COMPARISON_P (cond1)
9126 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9127 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9128 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9129 || ((swap_condition (GET_CODE (cond0))
9130 == reversed_comparison_code (cond1, NULL))
9131 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9132 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9133 && ! side_effects_p (x))
9134 {
9135 *ptrue = *pfalse = const0_rtx;
9136 return cond0;
9137 }
9138 }
9139 }
9140
9141 else if (code == IF_THEN_ELSE)
9142 {
9143 /* If we have IF_THEN_ELSE already, extract the condition and
9144 canonicalize it if it is NE or EQ. */
9145 cond0 = XEXP (x, 0);
9146 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9147 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9148 return XEXP (cond0, 0);
9149 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9150 {
9151 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9152 return XEXP (cond0, 0);
9153 }
9154 else
9155 return cond0;
9156 }
9157
9158 /* If X is a SUBREG, we can narrow both the true and false values
9159 if the inner expression, if there is a condition. */
9160 else if (code == SUBREG
9161 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9162 &true0, &false0)))
9163 {
9164 true0 = simplify_gen_subreg (mode, true0,
9165 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9166 false0 = simplify_gen_subreg (mode, false0,
9167 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9168 if (true0 && false0)
9169 {
9170 *ptrue = true0;
9171 *pfalse = false0;
9172 return cond0;
9173 }
9174 }
9175
9176 /* If X is a constant, this isn't special and will cause confusions
9177 if we treat it as such. Likewise if it is equivalent to a constant. */
9178 else if (CONSTANT_P (x)
9179 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9180 ;
9181
9182 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9183 will be least confusing to the rest of the compiler. */
9184 else if (mode == BImode)
9185 {
9186 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9187 return x;
9188 }
9189
9190 /* If X is known to be either 0 or -1, those are the true and
9191 false values when testing X. */
9192 else if (x == constm1_rtx || x == const0_rtx
9193 || (mode != VOIDmode && mode != BLKmode
9194 && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
9195 {
9196 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9197 return x;
9198 }
9199
9200 /* Likewise for 0 or a single bit. */
9201 else if (HWI_COMPUTABLE_MODE_P (mode)
9202 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9203 {
9204 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9205 return x;
9206 }
9207
9208 /* Otherwise fail; show no condition with true and false values the same. */
9209 *ptrue = *pfalse = x;
9210 return 0;
9211 }
9212 \f
9213 /* Return the value of expression X given the fact that condition COND
9214 is known to be true when applied to REG as its first operand and VAL
9215 as its second. X is known to not be shared and so can be modified in
9216 place.
9217
9218 We only handle the simplest cases, and specifically those cases that
9219 arise with IF_THEN_ELSE expressions. */
9220
9221 static rtx
9222 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9223 {
9224 enum rtx_code code = GET_CODE (x);
9225 const char *fmt;
9226 int i, j;
9227
9228 if (side_effects_p (x))
9229 return x;
9230
9231 /* If either operand of the condition is a floating point value,
9232 then we have to avoid collapsing an EQ comparison. */
9233 if (cond == EQ
9234 && rtx_equal_p (x, reg)
9235 && ! FLOAT_MODE_P (GET_MODE (x))
9236 && ! FLOAT_MODE_P (GET_MODE (val)))
9237 return val;
9238
9239 if (cond == UNEQ && rtx_equal_p (x, reg))
9240 return val;
9241
9242 /* If X is (abs REG) and we know something about REG's relationship
9243 with zero, we may be able to simplify this. */
9244
9245 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9246 switch (cond)
9247 {
9248 case GE: case GT: case EQ:
9249 return XEXP (x, 0);
9250 case LT: case LE:
9251 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9252 XEXP (x, 0),
9253 GET_MODE (XEXP (x, 0)));
9254 default:
9255 break;
9256 }
9257
9258 /* The only other cases we handle are MIN, MAX, and comparisons if the
9259 operands are the same as REG and VAL. */
9260
9261 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9262 {
9263 if (rtx_equal_p (XEXP (x, 0), val))
9264 {
9265 std::swap (val, reg);
9266 cond = swap_condition (cond);
9267 }
9268
9269 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9270 {
9271 if (COMPARISON_P (x))
9272 {
9273 if (comparison_dominates_p (cond, code))
9274 return const_true_rtx;
9275
9276 code = reversed_comparison_code (x, NULL);
9277 if (code != UNKNOWN
9278 && comparison_dominates_p (cond, code))
9279 return const0_rtx;
9280 else
9281 return x;
9282 }
9283 else if (code == SMAX || code == SMIN
9284 || code == UMIN || code == UMAX)
9285 {
9286 int unsignedp = (code == UMIN || code == UMAX);
9287
9288 /* Do not reverse the condition when it is NE or EQ.
9289 This is because we cannot conclude anything about
9290 the value of 'SMAX (x, y)' when x is not equal to y,
9291 but we can when x equals y. */
9292 if ((code == SMAX || code == UMAX)
9293 && ! (cond == EQ || cond == NE))
9294 cond = reverse_condition (cond);
9295
9296 switch (cond)
9297 {
9298 case GE: case GT:
9299 return unsignedp ? x : XEXP (x, 1);
9300 case LE: case LT:
9301 return unsignedp ? x : XEXP (x, 0);
9302 case GEU: case GTU:
9303 return unsignedp ? XEXP (x, 1) : x;
9304 case LEU: case LTU:
9305 return unsignedp ? XEXP (x, 0) : x;
9306 default:
9307 break;
9308 }
9309 }
9310 }
9311 }
9312 else if (code == SUBREG)
9313 {
9314 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9315 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9316
9317 if (SUBREG_REG (x) != r)
9318 {
9319 /* We must simplify subreg here, before we lose track of the
9320 original inner_mode. */
9321 new_rtx = simplify_subreg (GET_MODE (x), r,
9322 inner_mode, SUBREG_BYTE (x));
9323 if (new_rtx)
9324 return new_rtx;
9325 else
9326 SUBST (SUBREG_REG (x), r);
9327 }
9328
9329 return x;
9330 }
9331 /* We don't have to handle SIGN_EXTEND here, because even in the
9332 case of replacing something with a modeless CONST_INT, a
9333 CONST_INT is already (supposed to be) a valid sign extension for
9334 its narrower mode, which implies it's already properly
9335 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9336 story is different. */
9337 else if (code == ZERO_EXTEND)
9338 {
9339 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9340 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9341
9342 if (XEXP (x, 0) != r)
9343 {
9344 /* We must simplify the zero_extend here, before we lose
9345 track of the original inner_mode. */
9346 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9347 r, inner_mode);
9348 if (new_rtx)
9349 return new_rtx;
9350 else
9351 SUBST (XEXP (x, 0), r);
9352 }
9353
9354 return x;
9355 }
9356
9357 fmt = GET_RTX_FORMAT (code);
9358 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9359 {
9360 if (fmt[i] == 'e')
9361 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9362 else if (fmt[i] == 'E')
9363 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9364 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9365 cond, reg, val));
9366 }
9367
9368 return x;
9369 }
9370 \f
9371 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9372 assignment as a field assignment. */
9373
9374 static int
9375 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9376 {
9377 if (widen_x && GET_MODE (x) != GET_MODE (y))
9378 {
9379 if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (y)))
9380 return 0;
9381 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9382 return 0;
9383 /* For big endian, adjust the memory offset. */
9384 if (BYTES_BIG_ENDIAN)
9385 x = adjust_address_nv (x, GET_MODE (y),
9386 -subreg_lowpart_offset (GET_MODE (x),
9387 GET_MODE (y)));
9388 else
9389 x = adjust_address_nv (x, GET_MODE (y), 0);
9390 }
9391
9392 if (x == y || rtx_equal_p (x, y))
9393 return 1;
9394
9395 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9396 return 0;
9397
9398 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9399 Note that all SUBREGs of MEM are paradoxical; otherwise they
9400 would have been rewritten. */
9401 if (MEM_P (x) && GET_CODE (y) == SUBREG
9402 && MEM_P (SUBREG_REG (y))
9403 && rtx_equal_p (SUBREG_REG (y),
9404 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9405 return 1;
9406
9407 if (MEM_P (y) && GET_CODE (x) == SUBREG
9408 && MEM_P (SUBREG_REG (x))
9409 && rtx_equal_p (SUBREG_REG (x),
9410 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9411 return 1;
9412
9413 /* We used to see if get_last_value of X and Y were the same but that's
9414 not correct. In one direction, we'll cause the assignment to have
9415 the wrong destination and in the case, we'll import a register into this
9416 insn that might have already have been dead. So fail if none of the
9417 above cases are true. */
9418 return 0;
9419 }
9420 \f
9421 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9422 Return that assignment if so.
9423
9424 We only handle the most common cases. */
9425
9426 static rtx
9427 make_field_assignment (rtx x)
9428 {
9429 rtx dest = SET_DEST (x);
9430 rtx src = SET_SRC (x);
9431 rtx assign;
9432 rtx rhs, lhs;
9433 HOST_WIDE_INT c1;
9434 HOST_WIDE_INT pos;
9435 unsigned HOST_WIDE_INT len;
9436 rtx other;
9437 machine_mode mode;
9438
9439 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9440 a clear of a one-bit field. We will have changed it to
9441 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9442 for a SUBREG. */
9443
9444 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9445 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9446 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9447 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9448 {
9449 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9450 1, 1, 1, 0);
9451 if (assign != 0)
9452 return gen_rtx_SET (assign, const0_rtx);
9453 return x;
9454 }
9455
9456 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9457 && subreg_lowpart_p (XEXP (src, 0))
9458 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9459 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9460 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9461 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9462 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9463 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9464 {
9465 assign = make_extraction (VOIDmode, dest, 0,
9466 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9467 1, 1, 1, 0);
9468 if (assign != 0)
9469 return gen_rtx_SET (assign, const0_rtx);
9470 return x;
9471 }
9472
9473 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9474 one-bit field. */
9475 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9476 && XEXP (XEXP (src, 0), 0) == const1_rtx
9477 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9478 {
9479 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9480 1, 1, 1, 0);
9481 if (assign != 0)
9482 return gen_rtx_SET (assign, const1_rtx);
9483 return x;
9484 }
9485
9486 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9487 SRC is an AND with all bits of that field set, then we can discard
9488 the AND. */
9489 if (GET_CODE (dest) == ZERO_EXTRACT
9490 && CONST_INT_P (XEXP (dest, 1))
9491 && GET_CODE (src) == AND
9492 && CONST_INT_P (XEXP (src, 1)))
9493 {
9494 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9495 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9496 unsigned HOST_WIDE_INT ze_mask;
9497
9498 if (width >= HOST_BITS_PER_WIDE_INT)
9499 ze_mask = -1;
9500 else
9501 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9502
9503 /* Complete overlap. We can remove the source AND. */
9504 if ((and_mask & ze_mask) == ze_mask)
9505 return gen_rtx_SET (dest, XEXP (src, 0));
9506
9507 /* Partial overlap. We can reduce the source AND. */
9508 if ((and_mask & ze_mask) != and_mask)
9509 {
9510 mode = GET_MODE (src);
9511 src = gen_rtx_AND (mode, XEXP (src, 0),
9512 gen_int_mode (and_mask & ze_mask, mode));
9513 return gen_rtx_SET (dest, src);
9514 }
9515 }
9516
9517 /* The other case we handle is assignments into a constant-position
9518 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9519 a mask that has all one bits except for a group of zero bits and
9520 OTHER is known to have zeros where C1 has ones, this is such an
9521 assignment. Compute the position and length from C1. Shift OTHER
9522 to the appropriate position, force it to the required mode, and
9523 make the extraction. Check for the AND in both operands. */
9524
9525 /* One or more SUBREGs might obscure the constant-position field
9526 assignment. The first one we are likely to encounter is an outer
9527 narrowing SUBREG, which we can just strip for the purposes of
9528 identifying the constant-field assignment. */
9529 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src))
9530 src = SUBREG_REG (src);
9531
9532 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9533 return x;
9534
9535 rhs = expand_compound_operation (XEXP (src, 0));
9536 lhs = expand_compound_operation (XEXP (src, 1));
9537
9538 if (GET_CODE (rhs) == AND
9539 && CONST_INT_P (XEXP (rhs, 1))
9540 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9541 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9542 /* The second SUBREG that might get in the way is a paradoxical
9543 SUBREG around the first operand of the AND. We want to
9544 pretend the operand is as wide as the destination here. We
9545 do this by adjusting the MEM to wider mode for the sole
9546 purpose of the call to rtx_equal_for_field_assignment_p. Also
9547 note this trick only works for MEMs. */
9548 else if (GET_CODE (rhs) == AND
9549 && paradoxical_subreg_p (XEXP (rhs, 0))
9550 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9551 && CONST_INT_P (XEXP (rhs, 1))
9552 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9553 dest, true))
9554 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9555 else if (GET_CODE (lhs) == AND
9556 && CONST_INT_P (XEXP (lhs, 1))
9557 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9558 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9559 /* The second SUBREG that might get in the way is a paradoxical
9560 SUBREG around the first operand of the AND. We want to
9561 pretend the operand is as wide as the destination here. We
9562 do this by adjusting the MEM to wider mode for the sole
9563 purpose of the call to rtx_equal_for_field_assignment_p. Also
9564 note this trick only works for MEMs. */
9565 else if (GET_CODE (lhs) == AND
9566 && paradoxical_subreg_p (XEXP (lhs, 0))
9567 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9568 && CONST_INT_P (XEXP (lhs, 1))
9569 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9570 dest, true))
9571 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9572 else
9573 return x;
9574
9575 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9576 if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9577 || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9578 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9579 return x;
9580
9581 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9582 if (assign == 0)
9583 return x;
9584
9585 /* The mode to use for the source is the mode of the assignment, or of
9586 what is inside a possible STRICT_LOW_PART. */
9587 mode = (GET_CODE (assign) == STRICT_LOW_PART
9588 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9589
9590 /* Shift OTHER right POS places and make it the source, restricting it
9591 to the proper length and mode. */
9592
9593 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9594 GET_MODE (src),
9595 other, pos),
9596 dest);
9597 src = force_to_mode (src, mode,
9598 GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9599 ? HOST_WIDE_INT_M1U
9600 : (HOST_WIDE_INT_1U << len) - 1,
9601 0);
9602
9603 /* If SRC is masked by an AND that does not make a difference in
9604 the value being stored, strip it. */
9605 if (GET_CODE (assign) == ZERO_EXTRACT
9606 && CONST_INT_P (XEXP (assign, 1))
9607 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9608 && GET_CODE (src) == AND
9609 && CONST_INT_P (XEXP (src, 1))
9610 && UINTVAL (XEXP (src, 1))
9611 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9612 src = XEXP (src, 0);
9613
9614 return gen_rtx_SET (assign, src);
9615 }
9616 \f
9617 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9618 if so. */
9619
9620 static rtx
9621 apply_distributive_law (rtx x)
9622 {
9623 enum rtx_code code = GET_CODE (x);
9624 enum rtx_code inner_code;
9625 rtx lhs, rhs, other;
9626 rtx tem;
9627
9628 /* Distributivity is not true for floating point as it can change the
9629 value. So we don't do it unless -funsafe-math-optimizations. */
9630 if (FLOAT_MODE_P (GET_MODE (x))
9631 && ! flag_unsafe_math_optimizations)
9632 return x;
9633
9634 /* The outer operation can only be one of the following: */
9635 if (code != IOR && code != AND && code != XOR
9636 && code != PLUS && code != MINUS)
9637 return x;
9638
9639 lhs = XEXP (x, 0);
9640 rhs = XEXP (x, 1);
9641
9642 /* If either operand is a primitive we can't do anything, so get out
9643 fast. */
9644 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9645 return x;
9646
9647 lhs = expand_compound_operation (lhs);
9648 rhs = expand_compound_operation (rhs);
9649 inner_code = GET_CODE (lhs);
9650 if (inner_code != GET_CODE (rhs))
9651 return x;
9652
9653 /* See if the inner and outer operations distribute. */
9654 switch (inner_code)
9655 {
9656 case LSHIFTRT:
9657 case ASHIFTRT:
9658 case AND:
9659 case IOR:
9660 /* These all distribute except over PLUS. */
9661 if (code == PLUS || code == MINUS)
9662 return x;
9663 break;
9664
9665 case MULT:
9666 if (code != PLUS && code != MINUS)
9667 return x;
9668 break;
9669
9670 case ASHIFT:
9671 /* This is also a multiply, so it distributes over everything. */
9672 break;
9673
9674 /* This used to handle SUBREG, but this turned out to be counter-
9675 productive, since (subreg (op ...)) usually is not handled by
9676 insn patterns, and this "optimization" therefore transformed
9677 recognizable patterns into unrecognizable ones. Therefore the
9678 SUBREG case was removed from here.
9679
9680 It is possible that distributing SUBREG over arithmetic operations
9681 leads to an intermediate result than can then be optimized further,
9682 e.g. by moving the outer SUBREG to the other side of a SET as done
9683 in simplify_set. This seems to have been the original intent of
9684 handling SUBREGs here.
9685
9686 However, with current GCC this does not appear to actually happen,
9687 at least on major platforms. If some case is found where removing
9688 the SUBREG case here prevents follow-on optimizations, distributing
9689 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9690
9691 default:
9692 return x;
9693 }
9694
9695 /* Set LHS and RHS to the inner operands (A and B in the example
9696 above) and set OTHER to the common operand (C in the example).
9697 There is only one way to do this unless the inner operation is
9698 commutative. */
9699 if (COMMUTATIVE_ARITH_P (lhs)
9700 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9701 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9702 else if (COMMUTATIVE_ARITH_P (lhs)
9703 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9704 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9705 else if (COMMUTATIVE_ARITH_P (lhs)
9706 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9707 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9708 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9709 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9710 else
9711 return x;
9712
9713 /* Form the new inner operation, seeing if it simplifies first. */
9714 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9715
9716 /* There is one exception to the general way of distributing:
9717 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9718 if (code == XOR && inner_code == IOR)
9719 {
9720 inner_code = AND;
9721 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9722 }
9723
9724 /* We may be able to continuing distributing the result, so call
9725 ourselves recursively on the inner operation before forming the
9726 outer operation, which we return. */
9727 return simplify_gen_binary (inner_code, GET_MODE (x),
9728 apply_distributive_law (tem), other);
9729 }
9730
9731 /* See if X is of the form (* (+ A B) C), and if so convert to
9732 (+ (* A C) (* B C)) and try to simplify.
9733
9734 Most of the time, this results in no change. However, if some of
9735 the operands are the same or inverses of each other, simplifications
9736 will result.
9737
9738 For example, (and (ior A B) (not B)) can occur as the result of
9739 expanding a bit field assignment. When we apply the distributive
9740 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9741 which then simplifies to (and (A (not B))).
9742
9743 Note that no checks happen on the validity of applying the inverse
9744 distributive law. This is pointless since we can do it in the
9745 few places where this routine is called.
9746
9747 N is the index of the term that is decomposed (the arithmetic operation,
9748 i.e. (+ A B) in the first example above). !N is the index of the term that
9749 is distributed, i.e. of C in the first example above. */
9750 static rtx
9751 distribute_and_simplify_rtx (rtx x, int n)
9752 {
9753 machine_mode mode;
9754 enum rtx_code outer_code, inner_code;
9755 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9756
9757 /* Distributivity is not true for floating point as it can change the
9758 value. So we don't do it unless -funsafe-math-optimizations. */
9759 if (FLOAT_MODE_P (GET_MODE (x))
9760 && ! flag_unsafe_math_optimizations)
9761 return NULL_RTX;
9762
9763 decomposed = XEXP (x, n);
9764 if (!ARITHMETIC_P (decomposed))
9765 return NULL_RTX;
9766
9767 mode = GET_MODE (x);
9768 outer_code = GET_CODE (x);
9769 distributed = XEXP (x, !n);
9770
9771 inner_code = GET_CODE (decomposed);
9772 inner_op0 = XEXP (decomposed, 0);
9773 inner_op1 = XEXP (decomposed, 1);
9774
9775 /* Special case (and (xor B C) (not A)), which is equivalent to
9776 (xor (ior A B) (ior A C)) */
9777 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9778 {
9779 distributed = XEXP (distributed, 0);
9780 outer_code = IOR;
9781 }
9782
9783 if (n == 0)
9784 {
9785 /* Distribute the second term. */
9786 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9787 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9788 }
9789 else
9790 {
9791 /* Distribute the first term. */
9792 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9793 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9794 }
9795
9796 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9797 new_op0, new_op1));
9798 if (GET_CODE (tmp) != outer_code
9799 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9800 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9801 return tmp;
9802
9803 return NULL_RTX;
9804 }
9805 \f
9806 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9807 in MODE. Return an equivalent form, if different from (and VAROP
9808 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9809
9810 static rtx
9811 simplify_and_const_int_1 (machine_mode mode, rtx varop,
9812 unsigned HOST_WIDE_INT constop)
9813 {
9814 unsigned HOST_WIDE_INT nonzero;
9815 unsigned HOST_WIDE_INT orig_constop;
9816 rtx orig_varop;
9817 int i;
9818
9819 orig_varop = varop;
9820 orig_constop = constop;
9821 if (GET_CODE (varop) == CLOBBER)
9822 return NULL_RTX;
9823
9824 /* Simplify VAROP knowing that we will be only looking at some of the
9825 bits in it.
9826
9827 Note by passing in CONSTOP, we guarantee that the bits not set in
9828 CONSTOP are not significant and will never be examined. We must
9829 ensure that is the case by explicitly masking out those bits
9830 before returning. */
9831 varop = force_to_mode (varop, mode, constop, 0);
9832
9833 /* If VAROP is a CLOBBER, we will fail so return it. */
9834 if (GET_CODE (varop) == CLOBBER)
9835 return varop;
9836
9837 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9838 to VAROP and return the new constant. */
9839 if (CONST_INT_P (varop))
9840 return gen_int_mode (INTVAL (varop) & constop, mode);
9841
9842 /* See what bits may be nonzero in VAROP. Unlike the general case of
9843 a call to nonzero_bits, here we don't care about bits outside
9844 MODE. */
9845
9846 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9847
9848 /* Turn off all bits in the constant that are known to already be zero.
9849 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9850 which is tested below. */
9851
9852 constop &= nonzero;
9853
9854 /* If we don't have any bits left, return zero. */
9855 if (constop == 0)
9856 return const0_rtx;
9857
9858 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9859 a power of two, we can replace this with an ASHIFT. */
9860 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9861 && (i = exact_log2 (constop)) >= 0)
9862 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9863
9864 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9865 or XOR, then try to apply the distributive law. This may eliminate
9866 operations if either branch can be simplified because of the AND.
9867 It may also make some cases more complex, but those cases probably
9868 won't match a pattern either with or without this. */
9869
9870 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9871 return
9872 gen_lowpart
9873 (mode,
9874 apply_distributive_law
9875 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9876 simplify_and_const_int (NULL_RTX,
9877 GET_MODE (varop),
9878 XEXP (varop, 0),
9879 constop),
9880 simplify_and_const_int (NULL_RTX,
9881 GET_MODE (varop),
9882 XEXP (varop, 1),
9883 constop))));
9884
9885 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9886 the AND and see if one of the operands simplifies to zero. If so, we
9887 may eliminate it. */
9888
9889 if (GET_CODE (varop) == PLUS
9890 && pow2p_hwi (constop + 1))
9891 {
9892 rtx o0, o1;
9893
9894 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9895 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9896 if (o0 == const0_rtx)
9897 return o1;
9898 if (o1 == const0_rtx)
9899 return o0;
9900 }
9901
9902 /* Make a SUBREG if necessary. If we can't make it, fail. */
9903 varop = gen_lowpart (mode, varop);
9904 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9905 return NULL_RTX;
9906
9907 /* If we are only masking insignificant bits, return VAROP. */
9908 if (constop == nonzero)
9909 return varop;
9910
9911 if (varop == orig_varop && constop == orig_constop)
9912 return NULL_RTX;
9913
9914 /* Otherwise, return an AND. */
9915 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9916 }
9917
9918
9919 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9920 in MODE.
9921
9922 Return an equivalent form, if different from X. Otherwise, return X. If
9923 X is zero, we are to always construct the equivalent form. */
9924
9925 static rtx
9926 simplify_and_const_int (rtx x, machine_mode mode, rtx varop,
9927 unsigned HOST_WIDE_INT constop)
9928 {
9929 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9930 if (tem)
9931 return tem;
9932
9933 if (!x)
9934 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9935 gen_int_mode (constop, mode));
9936 if (GET_MODE (x) != mode)
9937 x = gen_lowpart (mode, x);
9938 return x;
9939 }
9940 \f
9941 /* Given a REG, X, compute which bits in X can be nonzero.
9942 We don't care about bits outside of those defined in MODE.
9943
9944 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9945 a shift, AND, or zero_extract, we can do better. */
9946
9947 static rtx
9948 reg_nonzero_bits_for_combine (const_rtx x, machine_mode mode,
9949 const_rtx known_x ATTRIBUTE_UNUSED,
9950 machine_mode known_mode ATTRIBUTE_UNUSED,
9951 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9952 unsigned HOST_WIDE_INT *nonzero)
9953 {
9954 rtx tem;
9955 reg_stat_type *rsp;
9956
9957 /* If X is a register whose nonzero bits value is current, use it.
9958 Otherwise, if X is a register whose value we can find, use that
9959 value. Otherwise, use the previously-computed global nonzero bits
9960 for this register. */
9961
9962 rsp = &reg_stat[REGNO (x)];
9963 if (rsp->last_set_value != 0
9964 && (rsp->last_set_mode == mode
9965 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9966 && GET_MODE_CLASS (mode) == MODE_INT))
9967 && ((rsp->last_set_label >= label_tick_ebb_start
9968 && rsp->last_set_label < label_tick)
9969 || (rsp->last_set_label == label_tick
9970 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9971 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9972 && REGNO (x) < reg_n_sets_max
9973 && REG_N_SETS (REGNO (x)) == 1
9974 && !REGNO_REG_SET_P
9975 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9976 REGNO (x)))))
9977 {
9978 /* Note that, even if the precision of last_set_mode is lower than that
9979 of mode, record_value_for_reg invoked nonzero_bits on the register
9980 with nonzero_bits_mode (because last_set_mode is necessarily integral
9981 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
9982 are all valid, hence in mode too since nonzero_bits_mode is defined
9983 to the largest HWI_COMPUTABLE_MODE_P mode. */
9984 *nonzero &= rsp->last_set_nonzero_bits;
9985 return NULL;
9986 }
9987
9988 tem = get_last_value (x);
9989 if (tem)
9990 {
9991 if (SHORT_IMMEDIATES_SIGN_EXTEND)
9992 tem = sign_extend_short_imm (tem, GET_MODE (x),
9993 GET_MODE_PRECISION (mode));
9994
9995 return tem;
9996 }
9997
9998 if (nonzero_sign_valid && rsp->nonzero_bits)
9999 {
10000 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10001
10002 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
10003 /* We don't know anything about the upper bits. */
10004 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
10005
10006 *nonzero &= mask;
10007 }
10008
10009 return NULL;
10010 }
10011
10012 /* Return the number of bits at the high-order end of X that are known to
10013 be equal to the sign bit. X will be used in mode MODE; if MODE is
10014 VOIDmode, X will be used in its own mode. The returned value will always
10015 be between 1 and the number of bits in MODE. */
10016
10017 static rtx
10018 reg_num_sign_bit_copies_for_combine (const_rtx x, machine_mode mode,
10019 const_rtx known_x ATTRIBUTE_UNUSED,
10020 machine_mode known_mode
10021 ATTRIBUTE_UNUSED,
10022 unsigned int known_ret ATTRIBUTE_UNUSED,
10023 unsigned int *result)
10024 {
10025 rtx tem;
10026 reg_stat_type *rsp;
10027
10028 rsp = &reg_stat[REGNO (x)];
10029 if (rsp->last_set_value != 0
10030 && rsp->last_set_mode == mode
10031 && ((rsp->last_set_label >= label_tick_ebb_start
10032 && rsp->last_set_label < label_tick)
10033 || (rsp->last_set_label == label_tick
10034 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10035 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10036 && REGNO (x) < reg_n_sets_max
10037 && REG_N_SETS (REGNO (x)) == 1
10038 && !REGNO_REG_SET_P
10039 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10040 REGNO (x)))))
10041 {
10042 *result = rsp->last_set_sign_bit_copies;
10043 return NULL;
10044 }
10045
10046 tem = get_last_value (x);
10047 if (tem != 0)
10048 return tem;
10049
10050 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10051 && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
10052 *result = rsp->sign_bit_copies;
10053
10054 return NULL;
10055 }
10056 \f
10057 /* Return the number of "extended" bits there are in X, when interpreted
10058 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10059 unsigned quantities, this is the number of high-order zero bits.
10060 For signed quantities, this is the number of copies of the sign bit
10061 minus 1. In both case, this function returns the number of "spare"
10062 bits. For example, if two quantities for which this function returns
10063 at least 1 are added, the addition is known not to overflow.
10064
10065 This function will always return 0 unless called during combine, which
10066 implies that it must be called from a define_split. */
10067
10068 unsigned int
10069 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10070 {
10071 if (nonzero_sign_valid == 0)
10072 return 0;
10073
10074 return (unsignedp
10075 ? (HWI_COMPUTABLE_MODE_P (mode)
10076 ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
10077 - floor_log2 (nonzero_bits (x, mode)))
10078 : 0)
10079 : num_sign_bit_copies (x, mode) - 1);
10080 }
10081
10082 /* This function is called from `simplify_shift_const' to merge two
10083 outer operations. Specifically, we have already found that we need
10084 to perform operation *POP0 with constant *PCONST0 at the outermost
10085 position. We would now like to also perform OP1 with constant CONST1
10086 (with *POP0 being done last).
10087
10088 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10089 the resulting operation. *PCOMP_P is set to 1 if we would need to
10090 complement the innermost operand, otherwise it is unchanged.
10091
10092 MODE is the mode in which the operation will be done. No bits outside
10093 the width of this mode matter. It is assumed that the width of this mode
10094 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10095
10096 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10097 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10098 result is simply *PCONST0.
10099
10100 If the resulting operation cannot be expressed as one operation, we
10101 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10102
10103 static int
10104 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10105 {
10106 enum rtx_code op0 = *pop0;
10107 HOST_WIDE_INT const0 = *pconst0;
10108
10109 const0 &= GET_MODE_MASK (mode);
10110 const1 &= GET_MODE_MASK (mode);
10111
10112 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10113 if (op0 == AND)
10114 const1 &= const0;
10115
10116 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10117 if OP0 is SET. */
10118
10119 if (op1 == UNKNOWN || op0 == SET)
10120 return 1;
10121
10122 else if (op0 == UNKNOWN)
10123 op0 = op1, const0 = const1;
10124
10125 else if (op0 == op1)
10126 {
10127 switch (op0)
10128 {
10129 case AND:
10130 const0 &= const1;
10131 break;
10132 case IOR:
10133 const0 |= const1;
10134 break;
10135 case XOR:
10136 const0 ^= const1;
10137 break;
10138 case PLUS:
10139 const0 += const1;
10140 break;
10141 case NEG:
10142 op0 = UNKNOWN;
10143 break;
10144 default:
10145 break;
10146 }
10147 }
10148
10149 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10150 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10151 return 0;
10152
10153 /* If the two constants aren't the same, we can't do anything. The
10154 remaining six cases can all be done. */
10155 else if (const0 != const1)
10156 return 0;
10157
10158 else
10159 switch (op0)
10160 {
10161 case IOR:
10162 if (op1 == AND)
10163 /* (a & b) | b == b */
10164 op0 = SET;
10165 else /* op1 == XOR */
10166 /* (a ^ b) | b == a | b */
10167 {;}
10168 break;
10169
10170 case XOR:
10171 if (op1 == AND)
10172 /* (a & b) ^ b == (~a) & b */
10173 op0 = AND, *pcomp_p = 1;
10174 else /* op1 == IOR */
10175 /* (a | b) ^ b == a & ~b */
10176 op0 = AND, const0 = ~const0;
10177 break;
10178
10179 case AND:
10180 if (op1 == IOR)
10181 /* (a | b) & b == b */
10182 op0 = SET;
10183 else /* op1 == XOR */
10184 /* (a ^ b) & b) == (~a) & b */
10185 *pcomp_p = 1;
10186 break;
10187 default:
10188 break;
10189 }
10190
10191 /* Check for NO-OP cases. */
10192 const0 &= GET_MODE_MASK (mode);
10193 if (const0 == 0
10194 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10195 op0 = UNKNOWN;
10196 else if (const0 == 0 && op0 == AND)
10197 op0 = SET;
10198 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10199 && op0 == AND)
10200 op0 = UNKNOWN;
10201
10202 *pop0 = op0;
10203
10204 /* ??? Slightly redundant with the above mask, but not entirely.
10205 Moving this above means we'd have to sign-extend the mode mask
10206 for the final test. */
10207 if (op0 != UNKNOWN && op0 != NEG)
10208 *pconst0 = trunc_int_for_mode (const0, mode);
10209
10210 return 1;
10211 }
10212 \f
10213 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10214 the shift in. The original shift operation CODE is performed on OP in
10215 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10216 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10217 result of the shift is subject to operation OUTER_CODE with operand
10218 OUTER_CONST. */
10219
10220 static machine_mode
10221 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10222 machine_mode orig_mode, machine_mode mode,
10223 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10224 {
10225 if (orig_mode == mode)
10226 return mode;
10227 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10228
10229 /* In general we can't perform in wider mode for right shift and rotate. */
10230 switch (code)
10231 {
10232 case ASHIFTRT:
10233 /* We can still widen if the bits brought in from the left are identical
10234 to the sign bit of ORIG_MODE. */
10235 if (num_sign_bit_copies (op, mode)
10236 > (unsigned) (GET_MODE_PRECISION (mode)
10237 - GET_MODE_PRECISION (orig_mode)))
10238 return mode;
10239 return orig_mode;
10240
10241 case LSHIFTRT:
10242 /* Similarly here but with zero bits. */
10243 if (HWI_COMPUTABLE_MODE_P (mode)
10244 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10245 return mode;
10246
10247 /* We can also widen if the bits brought in will be masked off. This
10248 operation is performed in ORIG_MODE. */
10249 if (outer_code == AND)
10250 {
10251 int care_bits = low_bitmask_len (orig_mode, outer_const);
10252
10253 if (care_bits >= 0
10254 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10255 return mode;
10256 }
10257 /* fall through */
10258
10259 case ROTATE:
10260 return orig_mode;
10261
10262 case ROTATERT:
10263 gcc_unreachable ();
10264
10265 default:
10266 return mode;
10267 }
10268 }
10269
10270 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10271 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10272 if we cannot simplify it. Otherwise, return a simplified value.
10273
10274 The shift is normally computed in the widest mode we find in VAROP, as
10275 long as it isn't a different number of words than RESULT_MODE. Exceptions
10276 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10277
10278 static rtx
10279 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10280 rtx varop, int orig_count)
10281 {
10282 enum rtx_code orig_code = code;
10283 rtx orig_varop = varop;
10284 int count;
10285 machine_mode mode = result_mode;
10286 machine_mode shift_mode, tmode;
10287 unsigned int mode_words
10288 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10289 /* We form (outer_op (code varop count) (outer_const)). */
10290 enum rtx_code outer_op = UNKNOWN;
10291 HOST_WIDE_INT outer_const = 0;
10292 int complement_p = 0;
10293 rtx new_rtx, x;
10294
10295 /* Make sure and truncate the "natural" shift on the way in. We don't
10296 want to do this inside the loop as it makes it more difficult to
10297 combine shifts. */
10298 if (SHIFT_COUNT_TRUNCATED)
10299 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10300
10301 /* If we were given an invalid count, don't do anything except exactly
10302 what was requested. */
10303
10304 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10305 return NULL_RTX;
10306
10307 count = orig_count;
10308
10309 /* Unless one of the branches of the `if' in this loop does a `continue',
10310 we will `break' the loop after the `if'. */
10311
10312 while (count != 0)
10313 {
10314 /* If we have an operand of (clobber (const_int 0)), fail. */
10315 if (GET_CODE (varop) == CLOBBER)
10316 return NULL_RTX;
10317
10318 /* Convert ROTATERT to ROTATE. */
10319 if (code == ROTATERT)
10320 {
10321 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10322 code = ROTATE;
10323 count = bitsize - count;
10324 }
10325
10326 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
10327 mode, outer_op, outer_const);
10328 machine_mode shift_unit_mode = GET_MODE_INNER (shift_mode);
10329
10330 /* Handle cases where the count is greater than the size of the mode
10331 minus 1. For ASHIFT, use the size minus one as the count (this can
10332 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10333 take the count modulo the size. For other shifts, the result is
10334 zero.
10335
10336 Since these shifts are being produced by the compiler by combining
10337 multiple operations, each of which are defined, we know what the
10338 result is supposed to be. */
10339
10340 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10341 {
10342 if (code == ASHIFTRT)
10343 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10344 else if (code == ROTATE || code == ROTATERT)
10345 count %= GET_MODE_PRECISION (shift_unit_mode);
10346 else
10347 {
10348 /* We can't simply return zero because there may be an
10349 outer op. */
10350 varop = const0_rtx;
10351 count = 0;
10352 break;
10353 }
10354 }
10355
10356 /* If we discovered we had to complement VAROP, leave. Making a NOT
10357 here would cause an infinite loop. */
10358 if (complement_p)
10359 break;
10360
10361 if (shift_mode == shift_unit_mode)
10362 {
10363 /* An arithmetic right shift of a quantity known to be -1 or 0
10364 is a no-op. */
10365 if (code == ASHIFTRT
10366 && (num_sign_bit_copies (varop, shift_unit_mode)
10367 == GET_MODE_PRECISION (shift_unit_mode)))
10368 {
10369 count = 0;
10370 break;
10371 }
10372
10373 /* If we are doing an arithmetic right shift and discarding all but
10374 the sign bit copies, this is equivalent to doing a shift by the
10375 bitsize minus one. Convert it into that shift because it will
10376 often allow other simplifications. */
10377
10378 if (code == ASHIFTRT
10379 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10380 >= GET_MODE_PRECISION (shift_unit_mode)))
10381 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10382
10383 /* We simplify the tests below and elsewhere by converting
10384 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10385 `make_compound_operation' will convert it to an ASHIFTRT for
10386 those machines (such as VAX) that don't have an LSHIFTRT. */
10387 if (code == ASHIFTRT
10388 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10389 && val_signbit_known_clear_p (shift_unit_mode,
10390 nonzero_bits (varop,
10391 shift_unit_mode)))
10392 code = LSHIFTRT;
10393
10394 if (((code == LSHIFTRT
10395 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10396 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10397 || (code == ASHIFT
10398 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10399 && !((nonzero_bits (varop, shift_unit_mode) << count)
10400 & GET_MODE_MASK (shift_unit_mode))))
10401 && !side_effects_p (varop))
10402 varop = const0_rtx;
10403 }
10404
10405 switch (GET_CODE (varop))
10406 {
10407 case SIGN_EXTEND:
10408 case ZERO_EXTEND:
10409 case SIGN_EXTRACT:
10410 case ZERO_EXTRACT:
10411 new_rtx = expand_compound_operation (varop);
10412 if (new_rtx != varop)
10413 {
10414 varop = new_rtx;
10415 continue;
10416 }
10417 break;
10418
10419 case MEM:
10420 /* The following rules apply only to scalars. */
10421 if (shift_mode != shift_unit_mode)
10422 break;
10423
10424 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10425 minus the width of a smaller mode, we can do this with a
10426 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10427 if ((code == ASHIFTRT || code == LSHIFTRT)
10428 && ! mode_dependent_address_p (XEXP (varop, 0),
10429 MEM_ADDR_SPACE (varop))
10430 && ! MEM_VOLATILE_P (varop)
10431 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10432 MODE_INT, 1)) != BLKmode)
10433 {
10434 new_rtx = adjust_address_nv (varop, tmode,
10435 BYTES_BIG_ENDIAN ? 0
10436 : count / BITS_PER_UNIT);
10437
10438 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10439 : ZERO_EXTEND, mode, new_rtx);
10440 count = 0;
10441 continue;
10442 }
10443 break;
10444
10445 case SUBREG:
10446 /* The following rules apply only to scalars. */
10447 if (shift_mode != shift_unit_mode)
10448 break;
10449
10450 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10451 the same number of words as what we've seen so far. Then store
10452 the widest mode in MODE. */
10453 if (subreg_lowpart_p (varop)
10454 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10455 > GET_MODE_SIZE (GET_MODE (varop)))
10456 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10457 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10458 == mode_words
10459 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10460 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10461 {
10462 varop = SUBREG_REG (varop);
10463 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10464 mode = GET_MODE (varop);
10465 continue;
10466 }
10467 break;
10468
10469 case MULT:
10470 /* Some machines use MULT instead of ASHIFT because MULT
10471 is cheaper. But it is still better on those machines to
10472 merge two shifts into one. */
10473 if (CONST_INT_P (XEXP (varop, 1))
10474 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10475 {
10476 varop
10477 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10478 XEXP (varop, 0),
10479 GEN_INT (exact_log2 (
10480 UINTVAL (XEXP (varop, 1)))));
10481 continue;
10482 }
10483 break;
10484
10485 case UDIV:
10486 /* Similar, for when divides are cheaper. */
10487 if (CONST_INT_P (XEXP (varop, 1))
10488 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10489 {
10490 varop
10491 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10492 XEXP (varop, 0),
10493 GEN_INT (exact_log2 (
10494 UINTVAL (XEXP (varop, 1)))));
10495 continue;
10496 }
10497 break;
10498
10499 case ASHIFTRT:
10500 /* If we are extracting just the sign bit of an arithmetic
10501 right shift, that shift is not needed. However, the sign
10502 bit of a wider mode may be different from what would be
10503 interpreted as the sign bit in a narrower mode, so, if
10504 the result is narrower, don't discard the shift. */
10505 if (code == LSHIFTRT
10506 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10507 && (GET_MODE_UNIT_BITSIZE (result_mode)
10508 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10509 {
10510 varop = XEXP (varop, 0);
10511 continue;
10512 }
10513
10514 /* fall through */
10515
10516 case LSHIFTRT:
10517 case ASHIFT:
10518 case ROTATE:
10519 /* The following rules apply only to scalars. */
10520 if (shift_mode != shift_unit_mode)
10521 break;
10522
10523 /* Here we have two nested shifts. The result is usually the
10524 AND of a new shift with a mask. We compute the result below. */
10525 if (CONST_INT_P (XEXP (varop, 1))
10526 && INTVAL (XEXP (varop, 1)) >= 0
10527 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10528 && HWI_COMPUTABLE_MODE_P (result_mode)
10529 && HWI_COMPUTABLE_MODE_P (mode))
10530 {
10531 enum rtx_code first_code = GET_CODE (varop);
10532 unsigned int first_count = INTVAL (XEXP (varop, 1));
10533 unsigned HOST_WIDE_INT mask;
10534 rtx mask_rtx;
10535
10536 /* We have one common special case. We can't do any merging if
10537 the inner code is an ASHIFTRT of a smaller mode. However, if
10538 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10539 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10540 we can convert it to
10541 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10542 This simplifies certain SIGN_EXTEND operations. */
10543 if (code == ASHIFT && first_code == ASHIFTRT
10544 && count == (GET_MODE_PRECISION (result_mode)
10545 - GET_MODE_PRECISION (GET_MODE (varop))))
10546 {
10547 /* C3 has the low-order C1 bits zero. */
10548
10549 mask = GET_MODE_MASK (mode)
10550 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10551
10552 varop = simplify_and_const_int (NULL_RTX, result_mode,
10553 XEXP (varop, 0), mask);
10554 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10555 varop, count);
10556 count = first_count;
10557 code = ASHIFTRT;
10558 continue;
10559 }
10560
10561 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10562 than C1 high-order bits equal to the sign bit, we can convert
10563 this to either an ASHIFT or an ASHIFTRT depending on the
10564 two counts.
10565
10566 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10567
10568 if (code == ASHIFTRT && first_code == ASHIFT
10569 && GET_MODE (varop) == shift_mode
10570 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10571 > first_count))
10572 {
10573 varop = XEXP (varop, 0);
10574 count -= first_count;
10575 if (count < 0)
10576 {
10577 count = -count;
10578 code = ASHIFT;
10579 }
10580
10581 continue;
10582 }
10583
10584 /* There are some cases we can't do. If CODE is ASHIFTRT,
10585 we can only do this if FIRST_CODE is also ASHIFTRT.
10586
10587 We can't do the case when CODE is ROTATE and FIRST_CODE is
10588 ASHIFTRT.
10589
10590 If the mode of this shift is not the mode of the outer shift,
10591 we can't do this if either shift is a right shift or ROTATE.
10592
10593 Finally, we can't do any of these if the mode is too wide
10594 unless the codes are the same.
10595
10596 Handle the case where the shift codes are the same
10597 first. */
10598
10599 if (code == first_code)
10600 {
10601 if (GET_MODE (varop) != result_mode
10602 && (code == ASHIFTRT || code == LSHIFTRT
10603 || code == ROTATE))
10604 break;
10605
10606 count += first_count;
10607 varop = XEXP (varop, 0);
10608 continue;
10609 }
10610
10611 if (code == ASHIFTRT
10612 || (code == ROTATE && first_code == ASHIFTRT)
10613 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10614 || (GET_MODE (varop) != result_mode
10615 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10616 || first_code == ROTATE
10617 || code == ROTATE)))
10618 break;
10619
10620 /* To compute the mask to apply after the shift, shift the
10621 nonzero bits of the inner shift the same way the
10622 outer shift will. */
10623
10624 mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
10625 result_mode);
10626
10627 mask_rtx
10628 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10629 GEN_INT (count));
10630
10631 /* Give up if we can't compute an outer operation to use. */
10632 if (mask_rtx == 0
10633 || !CONST_INT_P (mask_rtx)
10634 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10635 INTVAL (mask_rtx),
10636 result_mode, &complement_p))
10637 break;
10638
10639 /* If the shifts are in the same direction, we add the
10640 counts. Otherwise, we subtract them. */
10641 if ((code == ASHIFTRT || code == LSHIFTRT)
10642 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10643 count += first_count;
10644 else
10645 count -= first_count;
10646
10647 /* If COUNT is positive, the new shift is usually CODE,
10648 except for the two exceptions below, in which case it is
10649 FIRST_CODE. If the count is negative, FIRST_CODE should
10650 always be used */
10651 if (count > 0
10652 && ((first_code == ROTATE && code == ASHIFT)
10653 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10654 code = first_code;
10655 else if (count < 0)
10656 code = first_code, count = -count;
10657
10658 varop = XEXP (varop, 0);
10659 continue;
10660 }
10661
10662 /* If we have (A << B << C) for any shift, we can convert this to
10663 (A << C << B). This wins if A is a constant. Only try this if
10664 B is not a constant. */
10665
10666 else if (GET_CODE (varop) == code
10667 && CONST_INT_P (XEXP (varop, 0))
10668 && !CONST_INT_P (XEXP (varop, 1)))
10669 {
10670 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10671 sure the result will be masked. See PR70222. */
10672 if (code == LSHIFTRT
10673 && mode != result_mode
10674 && !merge_outer_ops (&outer_op, &outer_const, AND,
10675 GET_MODE_MASK (result_mode)
10676 >> orig_count, result_mode,
10677 &complement_p))
10678 break;
10679 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10680 up outer sign extension (often left and right shift) is
10681 hardly more efficient than the original. See PR70429. */
10682 if (code == ASHIFTRT && mode != result_mode)
10683 break;
10684
10685 rtx new_rtx = simplify_const_binary_operation (code, mode,
10686 XEXP (varop, 0),
10687 GEN_INT (count));
10688 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10689 count = 0;
10690 continue;
10691 }
10692 break;
10693
10694 case NOT:
10695 /* The following rules apply only to scalars. */
10696 if (shift_mode != shift_unit_mode)
10697 break;
10698
10699 /* Make this fit the case below. */
10700 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10701 continue;
10702
10703 case IOR:
10704 case AND:
10705 case XOR:
10706 /* The following rules apply only to scalars. */
10707 if (shift_mode != shift_unit_mode)
10708 break;
10709
10710 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10711 with C the size of VAROP - 1 and the shift is logical if
10712 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10713 we have an (le X 0) operation. If we have an arithmetic shift
10714 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10715 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10716
10717 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10718 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10719 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10720 && (code == LSHIFTRT || code == ASHIFTRT)
10721 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10722 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10723 {
10724 count = 0;
10725 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10726 const0_rtx);
10727
10728 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10729 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10730
10731 continue;
10732 }
10733
10734 /* If we have (shift (logical)), move the logical to the outside
10735 to allow it to possibly combine with another logical and the
10736 shift to combine with another shift. This also canonicalizes to
10737 what a ZERO_EXTRACT looks like. Also, some machines have
10738 (and (shift)) insns. */
10739
10740 if (CONST_INT_P (XEXP (varop, 1))
10741 /* We can't do this if we have (ashiftrt (xor)) and the
10742 constant has its sign bit set in shift_mode with shift_mode
10743 wider than result_mode. */
10744 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10745 && result_mode != shift_mode
10746 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10747 shift_mode))
10748 && (new_rtx = simplify_const_binary_operation
10749 (code, result_mode,
10750 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10751 GEN_INT (count))) != 0
10752 && CONST_INT_P (new_rtx)
10753 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10754 INTVAL (new_rtx), result_mode, &complement_p))
10755 {
10756 varop = XEXP (varop, 0);
10757 continue;
10758 }
10759
10760 /* If we can't do that, try to simplify the shift in each arm of the
10761 logical expression, make a new logical expression, and apply
10762 the inverse distributive law. This also can't be done for
10763 (ashiftrt (xor)) where we've widened the shift and the constant
10764 changes the sign bit. */
10765 if (CONST_INT_P (XEXP (varop, 1))
10766 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10767 && result_mode != shift_mode
10768 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10769 shift_mode)))
10770 {
10771 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10772 XEXP (varop, 0), count);
10773 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10774 XEXP (varop, 1), count);
10775
10776 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10777 lhs, rhs);
10778 varop = apply_distributive_law (varop);
10779
10780 count = 0;
10781 continue;
10782 }
10783 break;
10784
10785 case EQ:
10786 /* The following rules apply only to scalars. */
10787 if (shift_mode != shift_unit_mode)
10788 break;
10789
10790 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10791 says that the sign bit can be tested, FOO has mode MODE, C is
10792 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10793 that may be nonzero. */
10794 if (code == LSHIFTRT
10795 && XEXP (varop, 1) == const0_rtx
10796 && GET_MODE (XEXP (varop, 0)) == result_mode
10797 && count == (GET_MODE_PRECISION (result_mode) - 1)
10798 && HWI_COMPUTABLE_MODE_P (result_mode)
10799 && STORE_FLAG_VALUE == -1
10800 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10801 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10802 &complement_p))
10803 {
10804 varop = XEXP (varop, 0);
10805 count = 0;
10806 continue;
10807 }
10808 break;
10809
10810 case NEG:
10811 /* The following rules apply only to scalars. */
10812 if (shift_mode != shift_unit_mode)
10813 break;
10814
10815 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10816 than the number of bits in the mode is equivalent to A. */
10817 if (code == LSHIFTRT
10818 && count == (GET_MODE_PRECISION (result_mode) - 1)
10819 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10820 {
10821 varop = XEXP (varop, 0);
10822 count = 0;
10823 continue;
10824 }
10825
10826 /* NEG commutes with ASHIFT since it is multiplication. Move the
10827 NEG outside to allow shifts to combine. */
10828 if (code == ASHIFT
10829 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10830 &complement_p))
10831 {
10832 varop = XEXP (varop, 0);
10833 continue;
10834 }
10835 break;
10836
10837 case PLUS:
10838 /* The following rules apply only to scalars. */
10839 if (shift_mode != shift_unit_mode)
10840 break;
10841
10842 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10843 is one less than the number of bits in the mode is
10844 equivalent to (xor A 1). */
10845 if (code == LSHIFTRT
10846 && count == (GET_MODE_PRECISION (result_mode) - 1)
10847 && XEXP (varop, 1) == constm1_rtx
10848 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10849 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10850 &complement_p))
10851 {
10852 count = 0;
10853 varop = XEXP (varop, 0);
10854 continue;
10855 }
10856
10857 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10858 that might be nonzero in BAR are those being shifted out and those
10859 bits are known zero in FOO, we can replace the PLUS with FOO.
10860 Similarly in the other operand order. This code occurs when
10861 we are computing the size of a variable-size array. */
10862
10863 if ((code == ASHIFTRT || code == LSHIFTRT)
10864 && count < HOST_BITS_PER_WIDE_INT
10865 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10866 && (nonzero_bits (XEXP (varop, 1), result_mode)
10867 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10868 {
10869 varop = XEXP (varop, 0);
10870 continue;
10871 }
10872 else if ((code == ASHIFTRT || code == LSHIFTRT)
10873 && count < HOST_BITS_PER_WIDE_INT
10874 && HWI_COMPUTABLE_MODE_P (result_mode)
10875 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10876 >> count)
10877 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10878 & nonzero_bits (XEXP (varop, 1),
10879 result_mode)))
10880 {
10881 varop = XEXP (varop, 1);
10882 continue;
10883 }
10884
10885 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10886 if (code == ASHIFT
10887 && CONST_INT_P (XEXP (varop, 1))
10888 && (new_rtx = simplify_const_binary_operation
10889 (ASHIFT, result_mode,
10890 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10891 GEN_INT (count))) != 0
10892 && CONST_INT_P (new_rtx)
10893 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10894 INTVAL (new_rtx), result_mode, &complement_p))
10895 {
10896 varop = XEXP (varop, 0);
10897 continue;
10898 }
10899
10900 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10901 signbit', and attempt to change the PLUS to an XOR and move it to
10902 the outer operation as is done above in the AND/IOR/XOR case
10903 leg for shift(logical). See details in logical handling above
10904 for reasoning in doing so. */
10905 if (code == LSHIFTRT
10906 && CONST_INT_P (XEXP (varop, 1))
10907 && mode_signbit_p (result_mode, XEXP (varop, 1))
10908 && (new_rtx = simplify_const_binary_operation
10909 (code, result_mode,
10910 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10911 GEN_INT (count))) != 0
10912 && CONST_INT_P (new_rtx)
10913 && merge_outer_ops (&outer_op, &outer_const, XOR,
10914 INTVAL (new_rtx), result_mode, &complement_p))
10915 {
10916 varop = XEXP (varop, 0);
10917 continue;
10918 }
10919
10920 break;
10921
10922 case MINUS:
10923 /* The following rules apply only to scalars. */
10924 if (shift_mode != shift_unit_mode)
10925 break;
10926
10927 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10928 with C the size of VAROP - 1 and the shift is logical if
10929 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10930 we have a (gt X 0) operation. If the shift is arithmetic with
10931 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10932 we have a (neg (gt X 0)) operation. */
10933
10934 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10935 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10936 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10937 && (code == LSHIFTRT || code == ASHIFTRT)
10938 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10939 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10940 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10941 {
10942 count = 0;
10943 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10944 const0_rtx);
10945
10946 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10947 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10948
10949 continue;
10950 }
10951 break;
10952
10953 case TRUNCATE:
10954 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10955 if the truncate does not affect the value. */
10956 if (code == LSHIFTRT
10957 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10958 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10959 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10960 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
10961 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
10962 {
10963 rtx varop_inner = XEXP (varop, 0);
10964
10965 varop_inner
10966 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10967 XEXP (varop_inner, 0),
10968 GEN_INT
10969 (count + INTVAL (XEXP (varop_inner, 1))));
10970 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10971 count = 0;
10972 continue;
10973 }
10974 break;
10975
10976 default:
10977 break;
10978 }
10979
10980 break;
10981 }
10982
10983 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10984 outer_op, outer_const);
10985
10986 /* We have now finished analyzing the shift. The result should be
10987 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10988 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10989 to the result of the shift. OUTER_CONST is the relevant constant,
10990 but we must turn off all bits turned off in the shift. */
10991
10992 if (outer_op == UNKNOWN
10993 && orig_code == code && orig_count == count
10994 && varop == orig_varop
10995 && shift_mode == GET_MODE (varop))
10996 return NULL_RTX;
10997
10998 /* Make a SUBREG if necessary. If we can't make it, fail. */
10999 varop = gen_lowpart (shift_mode, varop);
11000 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11001 return NULL_RTX;
11002
11003 /* If we have an outer operation and we just made a shift, it is
11004 possible that we could have simplified the shift were it not
11005 for the outer operation. So try to do the simplification
11006 recursively. */
11007
11008 if (outer_op != UNKNOWN)
11009 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11010 else
11011 x = NULL_RTX;
11012
11013 if (x == NULL_RTX)
11014 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11015
11016 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11017 turn off all the bits that the shift would have turned off. */
11018 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11019 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
11020 GET_MODE_MASK (result_mode) >> orig_count);
11021
11022 /* Do the remainder of the processing in RESULT_MODE. */
11023 x = gen_lowpart_or_truncate (result_mode, x);
11024
11025 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11026 operation. */
11027 if (complement_p)
11028 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11029
11030 if (outer_op != UNKNOWN)
11031 {
11032 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11033 && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
11034 outer_const = trunc_int_for_mode (outer_const, result_mode);
11035
11036 if (outer_op == AND)
11037 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
11038 else if (outer_op == SET)
11039 {
11040 /* This means that we have determined that the result is
11041 equivalent to a constant. This should be rare. */
11042 if (!side_effects_p (x))
11043 x = GEN_INT (outer_const);
11044 }
11045 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11046 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
11047 else
11048 x = simplify_gen_binary (outer_op, result_mode, x,
11049 GEN_INT (outer_const));
11050 }
11051
11052 return x;
11053 }
11054
11055 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11056 The result of the shift is RESULT_MODE. If we cannot simplify it,
11057 return X or, if it is NULL, synthesize the expression with
11058 simplify_gen_binary. Otherwise, return a simplified value.
11059
11060 The shift is normally computed in the widest mode we find in VAROP, as
11061 long as it isn't a different number of words than RESULT_MODE. Exceptions
11062 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11063
11064 static rtx
11065 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11066 rtx varop, int count)
11067 {
11068 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11069 if (tem)
11070 return tem;
11071
11072 if (!x)
11073 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11074 if (GET_MODE (x) != result_mode)
11075 x = gen_lowpart (result_mode, x);
11076 return x;
11077 }
11078
11079 \f
11080 /* A subroutine of recog_for_combine. See there for arguments and
11081 return value. */
11082
11083 static int
11084 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11085 {
11086 rtx pat = *pnewpat;
11087 rtx pat_without_clobbers;
11088 int insn_code_number;
11089 int num_clobbers_to_add = 0;
11090 int i;
11091 rtx notes = NULL_RTX;
11092 rtx old_notes, old_pat;
11093 int old_icode;
11094
11095 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11096 we use to indicate that something didn't match. If we find such a
11097 thing, force rejection. */
11098 if (GET_CODE (pat) == PARALLEL)
11099 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11100 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11101 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11102 return -1;
11103
11104 old_pat = PATTERN (insn);
11105 old_notes = REG_NOTES (insn);
11106 PATTERN (insn) = pat;
11107 REG_NOTES (insn) = NULL_RTX;
11108
11109 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11110 if (dump_file && (dump_flags & TDF_DETAILS))
11111 {
11112 if (insn_code_number < 0)
11113 fputs ("Failed to match this instruction:\n", dump_file);
11114 else
11115 fputs ("Successfully matched this instruction:\n", dump_file);
11116 print_rtl_single (dump_file, pat);
11117 }
11118
11119 /* If it isn't, there is the possibility that we previously had an insn
11120 that clobbered some register as a side effect, but the combined
11121 insn doesn't need to do that. So try once more without the clobbers
11122 unless this represents an ASM insn. */
11123
11124 if (insn_code_number < 0 && ! check_asm_operands (pat)
11125 && GET_CODE (pat) == PARALLEL)
11126 {
11127 int pos;
11128
11129 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11130 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11131 {
11132 if (i != pos)
11133 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11134 pos++;
11135 }
11136
11137 SUBST_INT (XVECLEN (pat, 0), pos);
11138
11139 if (pos == 1)
11140 pat = XVECEXP (pat, 0, 0);
11141
11142 PATTERN (insn) = pat;
11143 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11144 if (dump_file && (dump_flags & TDF_DETAILS))
11145 {
11146 if (insn_code_number < 0)
11147 fputs ("Failed to match this instruction:\n", dump_file);
11148 else
11149 fputs ("Successfully matched this instruction:\n", dump_file);
11150 print_rtl_single (dump_file, pat);
11151 }
11152 }
11153
11154 pat_without_clobbers = pat;
11155
11156 PATTERN (insn) = old_pat;
11157 REG_NOTES (insn) = old_notes;
11158
11159 /* Recognize all noop sets, these will be killed by followup pass. */
11160 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11161 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11162
11163 /* If we had any clobbers to add, make a new pattern than contains
11164 them. Then check to make sure that all of them are dead. */
11165 if (num_clobbers_to_add)
11166 {
11167 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11168 rtvec_alloc (GET_CODE (pat) == PARALLEL
11169 ? (XVECLEN (pat, 0)
11170 + num_clobbers_to_add)
11171 : num_clobbers_to_add + 1));
11172
11173 if (GET_CODE (pat) == PARALLEL)
11174 for (i = 0; i < XVECLEN (pat, 0); i++)
11175 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11176 else
11177 XVECEXP (newpat, 0, 0) = pat;
11178
11179 add_clobbers (newpat, insn_code_number);
11180
11181 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11182 i < XVECLEN (newpat, 0); i++)
11183 {
11184 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11185 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11186 return -1;
11187 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11188 {
11189 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11190 notes = alloc_reg_note (REG_UNUSED,
11191 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11192 }
11193 }
11194 pat = newpat;
11195 }
11196
11197 if (insn_code_number >= 0
11198 && insn_code_number != NOOP_MOVE_INSN_CODE)
11199 {
11200 old_pat = PATTERN (insn);
11201 old_notes = REG_NOTES (insn);
11202 old_icode = INSN_CODE (insn);
11203 PATTERN (insn) = pat;
11204 REG_NOTES (insn) = notes;
11205
11206 /* Allow targets to reject combined insn. */
11207 if (!targetm.legitimate_combined_insn (insn))
11208 {
11209 if (dump_file && (dump_flags & TDF_DETAILS))
11210 fputs ("Instruction not appropriate for target.",
11211 dump_file);
11212
11213 /* Callers expect recog_for_combine to strip
11214 clobbers from the pattern on failure. */
11215 pat = pat_without_clobbers;
11216 notes = NULL_RTX;
11217
11218 insn_code_number = -1;
11219 }
11220
11221 PATTERN (insn) = old_pat;
11222 REG_NOTES (insn) = old_notes;
11223 INSN_CODE (insn) = old_icode;
11224 }
11225
11226 *pnewpat = pat;
11227 *pnotes = notes;
11228
11229 return insn_code_number;
11230 }
11231
11232 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11233 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11234 Return whether anything was so changed. */
11235
11236 static bool
11237 change_zero_ext (rtx pat)
11238 {
11239 bool changed = false;
11240 rtx *src = &SET_SRC (pat);
11241
11242 subrtx_ptr_iterator::array_type array;
11243 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11244 {
11245 rtx x = **iter;
11246 machine_mode mode = GET_MODE (x);
11247 int size;
11248
11249 if (GET_CODE (x) == ZERO_EXTRACT
11250 && CONST_INT_P (XEXP (x, 1))
11251 && CONST_INT_P (XEXP (x, 2))
11252 && GET_MODE (XEXP (x, 0)) == mode)
11253 {
11254 size = INTVAL (XEXP (x, 1));
11255
11256 int start = INTVAL (XEXP (x, 2));
11257 if (BITS_BIG_ENDIAN)
11258 start = GET_MODE_PRECISION (mode) - size - start;
11259
11260 if (start)
11261 x = gen_rtx_LSHIFTRT (mode, XEXP (x, 0), GEN_INT (start));
11262 else
11263 x = XEXP (x, 0);
11264 }
11265 else if (GET_CODE (x) == ZERO_EXTEND
11266 && SCALAR_INT_MODE_P (mode)
11267 && GET_CODE (XEXP (x, 0)) == SUBREG
11268 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11269 && !paradoxical_subreg_p (XEXP (x, 0))
11270 && subreg_lowpart_p (XEXP (x, 0)))
11271 {
11272 size = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
11273 x = SUBREG_REG (XEXP (x, 0));
11274 if (GET_MODE (x) != mode)
11275 x = gen_lowpart_SUBREG (mode, x);
11276 }
11277 else if (GET_CODE (x) == ZERO_EXTEND
11278 && SCALAR_INT_MODE_P (mode)
11279 && REG_P (XEXP (x, 0))
11280 && HARD_REGISTER_P (XEXP (x, 0))
11281 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11282 {
11283 size = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
11284 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11285 }
11286 else
11287 continue;
11288
11289 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11290 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11291
11292 SUBST (**iter, x);
11293 changed = true;
11294 }
11295
11296 if (changed)
11297 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11298 maybe_swap_commutative_operands (**iter);
11299
11300 rtx *dst = &SET_DEST (pat);
11301 if (GET_CODE (*dst) == ZERO_EXTRACT
11302 && REG_P (XEXP (*dst, 0))
11303 && CONST_INT_P (XEXP (*dst, 1))
11304 && CONST_INT_P (XEXP (*dst, 2)))
11305 {
11306 rtx reg = XEXP (*dst, 0);
11307 int width = INTVAL (XEXP (*dst, 1));
11308 int offset = INTVAL (XEXP (*dst, 2));
11309 machine_mode mode = GET_MODE (reg);
11310 int reg_width = GET_MODE_PRECISION (mode);
11311 if (BITS_BIG_ENDIAN)
11312 offset = reg_width - width - offset;
11313
11314 rtx x, y, z, w;
11315 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11316 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11317 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11318 if (offset)
11319 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11320 else
11321 y = SET_SRC (pat);
11322 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11323 w = gen_rtx_IOR (mode, x, z);
11324 SUBST (SET_DEST (pat), reg);
11325 SUBST (SET_SRC (pat), w);
11326
11327 changed = true;
11328 }
11329
11330 return changed;
11331 }
11332
11333 /* Like recog, but we receive the address of a pointer to a new pattern.
11334 We try to match the rtx that the pointer points to.
11335 If that fails, we may try to modify or replace the pattern,
11336 storing the replacement into the same pointer object.
11337
11338 Modifications include deletion or addition of CLOBBERs. If the
11339 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11340 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11341 (and undo if that fails).
11342
11343 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11344 the CLOBBERs are placed.
11345
11346 The value is the final insn code from the pattern ultimately matched,
11347 or -1. */
11348
11349 static int
11350 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11351 {
11352 rtx pat = *pnewpat;
11353 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11354 if (insn_code_number >= 0 || check_asm_operands (pat))
11355 return insn_code_number;
11356
11357 void *marker = get_undo_marker ();
11358 bool changed = false;
11359
11360 if (GET_CODE (pat) == SET)
11361 changed = change_zero_ext (pat);
11362 else if (GET_CODE (pat) == PARALLEL)
11363 {
11364 int i;
11365 for (i = 0; i < XVECLEN (pat, 0); i++)
11366 {
11367 rtx set = XVECEXP (pat, 0, i);
11368 if (GET_CODE (set) == SET)
11369 changed |= change_zero_ext (set);
11370 }
11371 }
11372
11373 if (changed)
11374 {
11375 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11376
11377 if (insn_code_number < 0)
11378 undo_to_marker (marker);
11379 }
11380
11381 return insn_code_number;
11382 }
11383 \f
11384 /* Like gen_lowpart_general but for use by combine. In combine it
11385 is not possible to create any new pseudoregs. However, it is
11386 safe to create invalid memory addresses, because combine will
11387 try to recognize them and all they will do is make the combine
11388 attempt fail.
11389
11390 If for some reason this cannot do its job, an rtx
11391 (clobber (const_int 0)) is returned.
11392 An insn containing that will not be recognized. */
11393
11394 static rtx
11395 gen_lowpart_for_combine (machine_mode omode, rtx x)
11396 {
11397 machine_mode imode = GET_MODE (x);
11398 unsigned int osize = GET_MODE_SIZE (omode);
11399 unsigned int isize = GET_MODE_SIZE (imode);
11400 rtx result;
11401
11402 if (omode == imode)
11403 return x;
11404
11405 /* We can only support MODE being wider than a word if X is a
11406 constant integer or has a mode the same size. */
11407 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11408 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11409 goto fail;
11410
11411 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11412 won't know what to do. So we will strip off the SUBREG here and
11413 process normally. */
11414 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11415 {
11416 x = SUBREG_REG (x);
11417
11418 /* For use in case we fall down into the address adjustments
11419 further below, we need to adjust the known mode and size of
11420 x; imode and isize, since we just adjusted x. */
11421 imode = GET_MODE (x);
11422
11423 if (imode == omode)
11424 return x;
11425
11426 isize = GET_MODE_SIZE (imode);
11427 }
11428
11429 result = gen_lowpart_common (omode, x);
11430
11431 if (result)
11432 return result;
11433
11434 if (MEM_P (x))
11435 {
11436 int offset = 0;
11437
11438 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11439 address. */
11440 if (MEM_VOLATILE_P (x)
11441 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11442 goto fail;
11443
11444 /* If we want to refer to something bigger than the original memref,
11445 generate a paradoxical subreg instead. That will force a reload
11446 of the original memref X. */
11447 if (isize < osize)
11448 return gen_rtx_SUBREG (omode, x, 0);
11449
11450 if (WORDS_BIG_ENDIAN)
11451 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11452
11453 /* Adjust the address so that the address-after-the-data is
11454 unchanged. */
11455 if (BYTES_BIG_ENDIAN)
11456 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11457
11458 return adjust_address_nv (x, omode, offset);
11459 }
11460
11461 /* If X is a comparison operator, rewrite it in a new mode. This
11462 probably won't match, but may allow further simplifications. */
11463 else if (COMPARISON_P (x))
11464 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11465
11466 /* If we couldn't simplify X any other way, just enclose it in a
11467 SUBREG. Normally, this SUBREG won't match, but some patterns may
11468 include an explicit SUBREG or we may simplify it further in combine. */
11469 else
11470 {
11471 rtx res;
11472
11473 if (imode == VOIDmode)
11474 {
11475 imode = int_mode_for_mode (omode);
11476 x = gen_lowpart_common (imode, x);
11477 if (x == NULL)
11478 goto fail;
11479 }
11480 res = lowpart_subreg (omode, x, imode);
11481 if (res)
11482 return res;
11483 }
11484
11485 fail:
11486 return gen_rtx_CLOBBER (omode, const0_rtx);
11487 }
11488 \f
11489 /* Try to simplify a comparison between OP0 and a constant OP1,
11490 where CODE is the comparison code that will be tested, into a
11491 (CODE OP0 const0_rtx) form.
11492
11493 The result is a possibly different comparison code to use.
11494 *POP1 may be updated. */
11495
11496 static enum rtx_code
11497 simplify_compare_const (enum rtx_code code, machine_mode mode,
11498 rtx op0, rtx *pop1)
11499 {
11500 unsigned int mode_width = GET_MODE_PRECISION (mode);
11501 HOST_WIDE_INT const_op = INTVAL (*pop1);
11502
11503 /* Get the constant we are comparing against and turn off all bits
11504 not on in our mode. */
11505 if (mode != VOIDmode)
11506 const_op = trunc_int_for_mode (const_op, mode);
11507
11508 /* If we are comparing against a constant power of two and the value
11509 being compared can only have that single bit nonzero (e.g., it was
11510 `and'ed with that bit), we can replace this with a comparison
11511 with zero. */
11512 if (const_op
11513 && (code == EQ || code == NE || code == GE || code == GEU
11514 || code == LT || code == LTU)
11515 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11516 && pow2p_hwi (const_op & GET_MODE_MASK (mode))
11517 && (nonzero_bits (op0, mode)
11518 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
11519 {
11520 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11521 const_op = 0;
11522 }
11523
11524 /* Similarly, if we are comparing a value known to be either -1 or
11525 0 with -1, change it to the opposite comparison against zero. */
11526 if (const_op == -1
11527 && (code == EQ || code == NE || code == GT || code == LE
11528 || code == GEU || code == LTU)
11529 && num_sign_bit_copies (op0, mode) == mode_width)
11530 {
11531 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11532 const_op = 0;
11533 }
11534
11535 /* Do some canonicalizations based on the comparison code. We prefer
11536 comparisons against zero and then prefer equality comparisons.
11537 If we can reduce the size of a constant, we will do that too. */
11538 switch (code)
11539 {
11540 case LT:
11541 /* < C is equivalent to <= (C - 1) */
11542 if (const_op > 0)
11543 {
11544 const_op -= 1;
11545 code = LE;
11546 /* ... fall through to LE case below. */
11547 gcc_fallthrough ();
11548 }
11549 else
11550 break;
11551
11552 case LE:
11553 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11554 if (const_op < 0)
11555 {
11556 const_op += 1;
11557 code = LT;
11558 }
11559
11560 /* If we are doing a <= 0 comparison on a value known to have
11561 a zero sign bit, we can replace this with == 0. */
11562 else if (const_op == 0
11563 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11564 && (nonzero_bits (op0, mode)
11565 & (HOST_WIDE_INT_1U << (mode_width - 1)))
11566 == 0)
11567 code = EQ;
11568 break;
11569
11570 case GE:
11571 /* >= C is equivalent to > (C - 1). */
11572 if (const_op > 0)
11573 {
11574 const_op -= 1;
11575 code = GT;
11576 /* ... fall through to GT below. */
11577 gcc_fallthrough ();
11578 }
11579 else
11580 break;
11581
11582 case GT:
11583 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11584 if (const_op < 0)
11585 {
11586 const_op += 1;
11587 code = GE;
11588 }
11589
11590 /* If we are doing a > 0 comparison on a value known to have
11591 a zero sign bit, we can replace this with != 0. */
11592 else if (const_op == 0
11593 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11594 && (nonzero_bits (op0, mode)
11595 & (HOST_WIDE_INT_1U << (mode_width - 1)))
11596 == 0)
11597 code = NE;
11598 break;
11599
11600 case LTU:
11601 /* < C is equivalent to <= (C - 1). */
11602 if (const_op > 0)
11603 {
11604 const_op -= 1;
11605 code = LEU;
11606 /* ... fall through ... */
11607 }
11608 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11609 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11610 && (unsigned HOST_WIDE_INT) const_op
11611 == HOST_WIDE_INT_1U << (mode_width - 1))
11612 {
11613 const_op = 0;
11614 code = GE;
11615 break;
11616 }
11617 else
11618 break;
11619
11620 case LEU:
11621 /* unsigned <= 0 is equivalent to == 0 */
11622 if (const_op == 0)
11623 code = EQ;
11624 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11625 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11626 && (unsigned HOST_WIDE_INT) const_op
11627 == (HOST_WIDE_INT_1U << (mode_width - 1)) - 1)
11628 {
11629 const_op = 0;
11630 code = GE;
11631 }
11632 break;
11633
11634 case GEU:
11635 /* >= C is equivalent to > (C - 1). */
11636 if (const_op > 1)
11637 {
11638 const_op -= 1;
11639 code = GTU;
11640 /* ... fall through ... */
11641 }
11642
11643 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11644 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11645 && (unsigned HOST_WIDE_INT) const_op
11646 == HOST_WIDE_INT_1U << (mode_width - 1))
11647 {
11648 const_op = 0;
11649 code = LT;
11650 break;
11651 }
11652 else
11653 break;
11654
11655 case GTU:
11656 /* unsigned > 0 is equivalent to != 0 */
11657 if (const_op == 0)
11658 code = NE;
11659 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11660 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11661 && (unsigned HOST_WIDE_INT) const_op
11662 == (HOST_WIDE_INT_1U << (mode_width - 1)) - 1)
11663 {
11664 const_op = 0;
11665 code = LT;
11666 }
11667 break;
11668
11669 default:
11670 break;
11671 }
11672
11673 *pop1 = GEN_INT (const_op);
11674 return code;
11675 }
11676 \f
11677 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11678 comparison code that will be tested.
11679
11680 The result is a possibly different comparison code to use. *POP0 and
11681 *POP1 may be updated.
11682
11683 It is possible that we might detect that a comparison is either always
11684 true or always false. However, we do not perform general constant
11685 folding in combine, so this knowledge isn't useful. Such tautologies
11686 should have been detected earlier. Hence we ignore all such cases. */
11687
11688 static enum rtx_code
11689 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11690 {
11691 rtx op0 = *pop0;
11692 rtx op1 = *pop1;
11693 rtx tem, tem1;
11694 int i;
11695 machine_mode mode, tmode;
11696
11697 /* Try a few ways of applying the same transformation to both operands. */
11698 while (1)
11699 {
11700 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11701 so check specially. */
11702 if (!WORD_REGISTER_OPERATIONS
11703 && code != GTU && code != GEU && code != LTU && code != LEU
11704 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11705 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11706 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11707 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11708 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11709 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11710 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11711 && CONST_INT_P (XEXP (op0, 1))
11712 && XEXP (op0, 1) == XEXP (op1, 1)
11713 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11714 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11715 && (INTVAL (XEXP (op0, 1))
11716 == (GET_MODE_PRECISION (GET_MODE (op0))
11717 - (GET_MODE_PRECISION
11718 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11719 {
11720 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11721 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11722 }
11723
11724 /* If both operands are the same constant shift, see if we can ignore the
11725 shift. We can if the shift is a rotate or if the bits shifted out of
11726 this shift are known to be zero for both inputs and if the type of
11727 comparison is compatible with the shift. */
11728 if (GET_CODE (op0) == GET_CODE (op1)
11729 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11730 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11731 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11732 && (code != GT && code != LT && code != GE && code != LE))
11733 || (GET_CODE (op0) == ASHIFTRT
11734 && (code != GTU && code != LTU
11735 && code != GEU && code != LEU)))
11736 && CONST_INT_P (XEXP (op0, 1))
11737 && INTVAL (XEXP (op0, 1)) >= 0
11738 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11739 && XEXP (op0, 1) == XEXP (op1, 1))
11740 {
11741 machine_mode mode = GET_MODE (op0);
11742 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11743 int shift_count = INTVAL (XEXP (op0, 1));
11744
11745 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11746 mask &= (mask >> shift_count) << shift_count;
11747 else if (GET_CODE (op0) == ASHIFT)
11748 mask = (mask & (mask << shift_count)) >> shift_count;
11749
11750 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11751 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11752 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11753 else
11754 break;
11755 }
11756
11757 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11758 SUBREGs are of the same mode, and, in both cases, the AND would
11759 be redundant if the comparison was done in the narrower mode,
11760 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11761 and the operand's possibly nonzero bits are 0xffffff01; in that case
11762 if we only care about QImode, we don't need the AND). This case
11763 occurs if the output mode of an scc insn is not SImode and
11764 STORE_FLAG_VALUE == 1 (e.g., the 386).
11765
11766 Similarly, check for a case where the AND's are ZERO_EXTEND
11767 operations from some narrower mode even though a SUBREG is not
11768 present. */
11769
11770 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11771 && CONST_INT_P (XEXP (op0, 1))
11772 && CONST_INT_P (XEXP (op1, 1)))
11773 {
11774 rtx inner_op0 = XEXP (op0, 0);
11775 rtx inner_op1 = XEXP (op1, 0);
11776 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11777 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11778 int changed = 0;
11779
11780 if (paradoxical_subreg_p (inner_op0)
11781 && GET_CODE (inner_op1) == SUBREG
11782 && (GET_MODE (SUBREG_REG (inner_op0))
11783 == GET_MODE (SUBREG_REG (inner_op1)))
11784 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11785 <= HOST_BITS_PER_WIDE_INT)
11786 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11787 GET_MODE (SUBREG_REG (inner_op0)))))
11788 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11789 GET_MODE (SUBREG_REG (inner_op1))))))
11790 {
11791 op0 = SUBREG_REG (inner_op0);
11792 op1 = SUBREG_REG (inner_op1);
11793
11794 /* The resulting comparison is always unsigned since we masked
11795 off the original sign bit. */
11796 code = unsigned_condition (code);
11797
11798 changed = 1;
11799 }
11800
11801 else if (c0 == c1)
11802 for (tmode = GET_CLASS_NARROWEST_MODE
11803 (GET_MODE_CLASS (GET_MODE (op0)));
11804 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11805 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11806 {
11807 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11808 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11809 code = unsigned_condition (code);
11810 changed = 1;
11811 break;
11812 }
11813
11814 if (! changed)
11815 break;
11816 }
11817
11818 /* If both operands are NOT, we can strip off the outer operation
11819 and adjust the comparison code for swapped operands; similarly for
11820 NEG, except that this must be an equality comparison. */
11821 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11822 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11823 && (code == EQ || code == NE)))
11824 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11825
11826 else
11827 break;
11828 }
11829
11830 /* If the first operand is a constant, swap the operands and adjust the
11831 comparison code appropriately, but don't do this if the second operand
11832 is already a constant integer. */
11833 if (swap_commutative_operands_p (op0, op1))
11834 {
11835 std::swap (op0, op1);
11836 code = swap_condition (code);
11837 }
11838
11839 /* We now enter a loop during which we will try to simplify the comparison.
11840 For the most part, we only are concerned with comparisons with zero,
11841 but some things may really be comparisons with zero but not start
11842 out looking that way. */
11843
11844 while (CONST_INT_P (op1))
11845 {
11846 machine_mode mode = GET_MODE (op0);
11847 unsigned int mode_width = GET_MODE_PRECISION (mode);
11848 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11849 int equality_comparison_p;
11850 int sign_bit_comparison_p;
11851 int unsigned_comparison_p;
11852 HOST_WIDE_INT const_op;
11853
11854 /* We only want to handle integral modes. This catches VOIDmode,
11855 CCmode, and the floating-point modes. An exception is that we
11856 can handle VOIDmode if OP0 is a COMPARE or a comparison
11857 operation. */
11858
11859 if (GET_MODE_CLASS (mode) != MODE_INT
11860 && ! (mode == VOIDmode
11861 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11862 break;
11863
11864 /* Try to simplify the compare to constant, possibly changing the
11865 comparison op, and/or changing op1 to zero. */
11866 code = simplify_compare_const (code, mode, op0, &op1);
11867 const_op = INTVAL (op1);
11868
11869 /* Compute some predicates to simplify code below. */
11870
11871 equality_comparison_p = (code == EQ || code == NE);
11872 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11873 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11874 || code == GEU);
11875
11876 /* If this is a sign bit comparison and we can do arithmetic in
11877 MODE, say that we will only be needing the sign bit of OP0. */
11878 if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11879 op0 = force_to_mode (op0, mode,
11880 HOST_WIDE_INT_1U
11881 << (GET_MODE_PRECISION (mode) - 1),
11882 0);
11883
11884 /* Now try cases based on the opcode of OP0. If none of the cases
11885 does a "continue", we exit this loop immediately after the
11886 switch. */
11887
11888 switch (GET_CODE (op0))
11889 {
11890 case ZERO_EXTRACT:
11891 /* If we are extracting a single bit from a variable position in
11892 a constant that has only a single bit set and are comparing it
11893 with zero, we can convert this into an equality comparison
11894 between the position and the location of the single bit. */
11895 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11896 have already reduced the shift count modulo the word size. */
11897 if (!SHIFT_COUNT_TRUNCATED
11898 && CONST_INT_P (XEXP (op0, 0))
11899 && XEXP (op0, 1) == const1_rtx
11900 && equality_comparison_p && const_op == 0
11901 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11902 {
11903 if (BITS_BIG_ENDIAN)
11904 i = BITS_PER_WORD - 1 - i;
11905
11906 op0 = XEXP (op0, 2);
11907 op1 = GEN_INT (i);
11908 const_op = i;
11909
11910 /* Result is nonzero iff shift count is equal to I. */
11911 code = reverse_condition (code);
11912 continue;
11913 }
11914
11915 /* fall through */
11916
11917 case SIGN_EXTRACT:
11918 tem = expand_compound_operation (op0);
11919 if (tem != op0)
11920 {
11921 op0 = tem;
11922 continue;
11923 }
11924 break;
11925
11926 case NOT:
11927 /* If testing for equality, we can take the NOT of the constant. */
11928 if (equality_comparison_p
11929 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11930 {
11931 op0 = XEXP (op0, 0);
11932 op1 = tem;
11933 continue;
11934 }
11935
11936 /* If just looking at the sign bit, reverse the sense of the
11937 comparison. */
11938 if (sign_bit_comparison_p)
11939 {
11940 op0 = XEXP (op0, 0);
11941 code = (code == GE ? LT : GE);
11942 continue;
11943 }
11944 break;
11945
11946 case NEG:
11947 /* If testing for equality, we can take the NEG of the constant. */
11948 if (equality_comparison_p
11949 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11950 {
11951 op0 = XEXP (op0, 0);
11952 op1 = tem;
11953 continue;
11954 }
11955
11956 /* The remaining cases only apply to comparisons with zero. */
11957 if (const_op != 0)
11958 break;
11959
11960 /* When X is ABS or is known positive,
11961 (neg X) is < 0 if and only if X != 0. */
11962
11963 if (sign_bit_comparison_p
11964 && (GET_CODE (XEXP (op0, 0)) == ABS
11965 || (mode_width <= HOST_BITS_PER_WIDE_INT
11966 && (nonzero_bits (XEXP (op0, 0), mode)
11967 & (HOST_WIDE_INT_1U << (mode_width - 1)))
11968 == 0)))
11969 {
11970 op0 = XEXP (op0, 0);
11971 code = (code == LT ? NE : EQ);
11972 continue;
11973 }
11974
11975 /* If we have NEG of something whose two high-order bits are the
11976 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11977 if (num_sign_bit_copies (op0, mode) >= 2)
11978 {
11979 op0 = XEXP (op0, 0);
11980 code = swap_condition (code);
11981 continue;
11982 }
11983 break;
11984
11985 case ROTATE:
11986 /* If we are testing equality and our count is a constant, we
11987 can perform the inverse operation on our RHS. */
11988 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11989 && (tem = simplify_binary_operation (ROTATERT, mode,
11990 op1, XEXP (op0, 1))) != 0)
11991 {
11992 op0 = XEXP (op0, 0);
11993 op1 = tem;
11994 continue;
11995 }
11996
11997 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11998 a particular bit. Convert it to an AND of a constant of that
11999 bit. This will be converted into a ZERO_EXTRACT. */
12000 if (const_op == 0 && sign_bit_comparison_p
12001 && CONST_INT_P (XEXP (op0, 1))
12002 && mode_width <= HOST_BITS_PER_WIDE_INT)
12003 {
12004 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12005 (HOST_WIDE_INT_1U
12006 << (mode_width - 1
12007 - INTVAL (XEXP (op0, 1)))));
12008 code = (code == LT ? NE : EQ);
12009 continue;
12010 }
12011
12012 /* Fall through. */
12013
12014 case ABS:
12015 /* ABS is ignorable inside an equality comparison with zero. */
12016 if (const_op == 0 && equality_comparison_p)
12017 {
12018 op0 = XEXP (op0, 0);
12019 continue;
12020 }
12021 break;
12022
12023 case SIGN_EXTEND:
12024 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12025 (compare FOO CONST) if CONST fits in FOO's mode and we
12026 are either testing inequality or have an unsigned
12027 comparison with ZERO_EXTEND or a signed comparison with
12028 SIGN_EXTEND. But don't do it if we don't have a compare
12029 insn of the given mode, since we'd have to revert it
12030 later on, and then we wouldn't know whether to sign- or
12031 zero-extend. */
12032 mode = GET_MODE (XEXP (op0, 0));
12033 if (GET_MODE_CLASS (mode) == MODE_INT
12034 && ! unsigned_comparison_p
12035 && HWI_COMPUTABLE_MODE_P (mode)
12036 && trunc_int_for_mode (const_op, mode) == const_op
12037 && have_insn_for (COMPARE, mode))
12038 {
12039 op0 = XEXP (op0, 0);
12040 continue;
12041 }
12042 break;
12043
12044 case SUBREG:
12045 /* Check for the case where we are comparing A - C1 with C2, that is
12046
12047 (subreg:MODE (plus (A) (-C1))) op (C2)
12048
12049 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12050 comparison in the wider mode. One of the following two conditions
12051 must be true in order for this to be valid:
12052
12053 1. The mode extension results in the same bit pattern being added
12054 on both sides and the comparison is equality or unsigned. As
12055 C2 has been truncated to fit in MODE, the pattern can only be
12056 all 0s or all 1s.
12057
12058 2. The mode extension results in the sign bit being copied on
12059 each side.
12060
12061 The difficulty here is that we have predicates for A but not for
12062 (A - C1) so we need to check that C1 is within proper bounds so
12063 as to perturbate A as little as possible. */
12064
12065 if (mode_width <= HOST_BITS_PER_WIDE_INT
12066 && subreg_lowpart_p (op0)
12067 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
12068 && GET_CODE (SUBREG_REG (op0)) == PLUS
12069 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12070 {
12071 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
12072 rtx a = XEXP (SUBREG_REG (op0), 0);
12073 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12074
12075 if ((c1 > 0
12076 && (unsigned HOST_WIDE_INT) c1
12077 < HOST_WIDE_INT_1U << (mode_width - 1)
12078 && (equality_comparison_p || unsigned_comparison_p)
12079 /* (A - C1) zero-extends if it is positive and sign-extends
12080 if it is negative, C2 both zero- and sign-extends. */
12081 && ((0 == (nonzero_bits (a, inner_mode)
12082 & ~GET_MODE_MASK (mode))
12083 && const_op >= 0)
12084 /* (A - C1) sign-extends if it is positive and 1-extends
12085 if it is negative, C2 both sign- and 1-extends. */
12086 || (num_sign_bit_copies (a, inner_mode)
12087 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12088 - mode_width)
12089 && const_op < 0)))
12090 || ((unsigned HOST_WIDE_INT) c1
12091 < HOST_WIDE_INT_1U << (mode_width - 2)
12092 /* (A - C1) always sign-extends, like C2. */
12093 && num_sign_bit_copies (a, inner_mode)
12094 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12095 - (mode_width - 1))))
12096 {
12097 op0 = SUBREG_REG (op0);
12098 continue;
12099 }
12100 }
12101
12102 /* If the inner mode is narrower and we are extracting the low part,
12103 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12104 if (subreg_lowpart_p (op0)
12105 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
12106 ;
12107 else if (subreg_lowpart_p (op0)
12108 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12109 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12110 && (code == NE || code == EQ)
12111 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12112 <= HOST_BITS_PER_WIDE_INT)
12113 && !paradoxical_subreg_p (op0)
12114 && (nonzero_bits (SUBREG_REG (op0),
12115 GET_MODE (SUBREG_REG (op0)))
12116 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12117 {
12118 /* Remove outer subregs that don't do anything. */
12119 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12120
12121 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12122 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12123 {
12124 op0 = SUBREG_REG (op0);
12125 op1 = tem;
12126 continue;
12127 }
12128 break;
12129 }
12130 else
12131 break;
12132
12133 /* FALLTHROUGH */
12134
12135 case ZERO_EXTEND:
12136 mode = GET_MODE (XEXP (op0, 0));
12137 if (GET_MODE_CLASS (mode) == MODE_INT
12138 && (unsigned_comparison_p || equality_comparison_p)
12139 && HWI_COMPUTABLE_MODE_P (mode)
12140 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12141 && const_op >= 0
12142 && have_insn_for (COMPARE, mode))
12143 {
12144 op0 = XEXP (op0, 0);
12145 continue;
12146 }
12147 break;
12148
12149 case PLUS:
12150 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12151 this for equality comparisons due to pathological cases involving
12152 overflows. */
12153 if (equality_comparison_p
12154 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12155 op1, XEXP (op0, 1))))
12156 {
12157 op0 = XEXP (op0, 0);
12158 op1 = tem;
12159 continue;
12160 }
12161
12162 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12163 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12164 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12165 {
12166 op0 = XEXP (XEXP (op0, 0), 0);
12167 code = (code == LT ? EQ : NE);
12168 continue;
12169 }
12170 break;
12171
12172 case MINUS:
12173 /* We used to optimize signed comparisons against zero, but that
12174 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12175 arrive here as equality comparisons, or (GEU, LTU) are
12176 optimized away. No need to special-case them. */
12177
12178 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12179 (eq B (minus A C)), whichever simplifies. We can only do
12180 this for equality comparisons due to pathological cases involving
12181 overflows. */
12182 if (equality_comparison_p
12183 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12184 XEXP (op0, 1), op1)))
12185 {
12186 op0 = XEXP (op0, 0);
12187 op1 = tem;
12188 continue;
12189 }
12190
12191 if (equality_comparison_p
12192 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12193 XEXP (op0, 0), op1)))
12194 {
12195 op0 = XEXP (op0, 1);
12196 op1 = tem;
12197 continue;
12198 }
12199
12200 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12201 of bits in X minus 1, is one iff X > 0. */
12202 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12203 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12204 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12205 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12206 {
12207 op0 = XEXP (op0, 1);
12208 code = (code == GE ? LE : GT);
12209 continue;
12210 }
12211 break;
12212
12213 case XOR:
12214 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12215 if C is zero or B is a constant. */
12216 if (equality_comparison_p
12217 && 0 != (tem = simplify_binary_operation (XOR, mode,
12218 XEXP (op0, 1), op1)))
12219 {
12220 op0 = XEXP (op0, 0);
12221 op1 = tem;
12222 continue;
12223 }
12224 break;
12225
12226 case EQ: case NE:
12227 case UNEQ: case LTGT:
12228 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
12229 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
12230 case UNORDERED: case ORDERED:
12231 /* We can't do anything if OP0 is a condition code value, rather
12232 than an actual data value. */
12233 if (const_op != 0
12234 || CC0_P (XEXP (op0, 0))
12235 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12236 break;
12237
12238 /* Get the two operands being compared. */
12239 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12240 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12241 else
12242 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12243
12244 /* Check for the cases where we simply want the result of the
12245 earlier test or the opposite of that result. */
12246 if (code == NE || code == EQ
12247 || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
12248 && (code == LT || code == GE)))
12249 {
12250 enum rtx_code new_code;
12251 if (code == LT || code == NE)
12252 new_code = GET_CODE (op0);
12253 else
12254 new_code = reversed_comparison_code (op0, NULL);
12255
12256 if (new_code != UNKNOWN)
12257 {
12258 code = new_code;
12259 op0 = tem;
12260 op1 = tem1;
12261 continue;
12262 }
12263 }
12264 break;
12265
12266 case IOR:
12267 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12268 iff X <= 0. */
12269 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12270 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12271 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12272 {
12273 op0 = XEXP (op0, 1);
12274 code = (code == GE ? GT : LE);
12275 continue;
12276 }
12277 break;
12278
12279 case AND:
12280 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12281 will be converted to a ZERO_EXTRACT later. */
12282 if (const_op == 0 && equality_comparison_p
12283 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12284 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12285 {
12286 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12287 XEXP (XEXP (op0, 0), 1));
12288 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12289 continue;
12290 }
12291
12292 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12293 zero and X is a comparison and C1 and C2 describe only bits set
12294 in STORE_FLAG_VALUE, we can compare with X. */
12295 if (const_op == 0 && equality_comparison_p
12296 && mode_width <= HOST_BITS_PER_WIDE_INT
12297 && CONST_INT_P (XEXP (op0, 1))
12298 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12299 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12300 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12301 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12302 {
12303 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12304 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12305 if ((~STORE_FLAG_VALUE & mask) == 0
12306 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12307 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12308 && COMPARISON_P (tem))))
12309 {
12310 op0 = XEXP (XEXP (op0, 0), 0);
12311 continue;
12312 }
12313 }
12314
12315 /* If we are doing an equality comparison of an AND of a bit equal
12316 to the sign bit, replace this with a LT or GE comparison of
12317 the underlying value. */
12318 if (equality_comparison_p
12319 && const_op == 0
12320 && CONST_INT_P (XEXP (op0, 1))
12321 && mode_width <= HOST_BITS_PER_WIDE_INT
12322 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12323 == HOST_WIDE_INT_1U << (mode_width - 1)))
12324 {
12325 op0 = XEXP (op0, 0);
12326 code = (code == EQ ? GE : LT);
12327 continue;
12328 }
12329
12330 /* If this AND operation is really a ZERO_EXTEND from a narrower
12331 mode, the constant fits within that mode, and this is either an
12332 equality or unsigned comparison, try to do this comparison in
12333 the narrower mode.
12334
12335 Note that in:
12336
12337 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12338 -> (ne:DI (reg:SI 4) (const_int 0))
12339
12340 unless TRULY_NOOP_TRUNCATION allows it or the register is
12341 known to hold a value of the required mode the
12342 transformation is invalid. */
12343 if ((equality_comparison_p || unsigned_comparison_p)
12344 && CONST_INT_P (XEXP (op0, 1))
12345 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12346 & GET_MODE_MASK (mode))
12347 + 1)) >= 0
12348 && const_op >> i == 0
12349 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
12350 {
12351 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12352 continue;
12353 }
12354
12355 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12356 fits in both M1 and M2 and the SUBREG is either paradoxical
12357 or represents the low part, permute the SUBREG and the AND
12358 and try again. */
12359 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12360 && CONST_INT_P (XEXP (op0, 1)))
12361 {
12362 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
12363 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12364 /* Require an integral mode, to avoid creating something like
12365 (AND:SF ...). */
12366 if (SCALAR_INT_MODE_P (tmode)
12367 /* It is unsafe to commute the AND into the SUBREG if the
12368 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12369 not defined. As originally written the upper bits
12370 have a defined value due to the AND operation.
12371 However, if we commute the AND inside the SUBREG then
12372 they no longer have defined values and the meaning of
12373 the code has been changed.
12374 Also C1 should not change value in the smaller mode,
12375 see PR67028 (a positive C1 can become negative in the
12376 smaller mode, so that the AND does no longer mask the
12377 upper bits). */
12378 && ((WORD_REGISTER_OPERATIONS
12379 && mode_width > GET_MODE_PRECISION (tmode)
12380 && mode_width <= BITS_PER_WORD
12381 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12382 || (mode_width <= GET_MODE_PRECISION (tmode)
12383 && subreg_lowpart_p (XEXP (op0, 0))))
12384 && mode_width <= HOST_BITS_PER_WIDE_INT
12385 && HWI_COMPUTABLE_MODE_P (tmode)
12386 && (c1 & ~mask) == 0
12387 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12388 && c1 != mask
12389 && c1 != GET_MODE_MASK (tmode))
12390 {
12391 op0 = simplify_gen_binary (AND, tmode,
12392 SUBREG_REG (XEXP (op0, 0)),
12393 gen_int_mode (c1, tmode));
12394 op0 = gen_lowpart (mode, op0);
12395 continue;
12396 }
12397 }
12398
12399 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12400 if (const_op == 0 && equality_comparison_p
12401 && XEXP (op0, 1) == const1_rtx
12402 && GET_CODE (XEXP (op0, 0)) == NOT)
12403 {
12404 op0 = simplify_and_const_int (NULL_RTX, mode,
12405 XEXP (XEXP (op0, 0), 0), 1);
12406 code = (code == NE ? EQ : NE);
12407 continue;
12408 }
12409
12410 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12411 (eq (and (lshiftrt X) 1) 0).
12412 Also handle the case where (not X) is expressed using xor. */
12413 if (const_op == 0 && equality_comparison_p
12414 && XEXP (op0, 1) == const1_rtx
12415 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12416 {
12417 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12418 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12419
12420 if (GET_CODE (shift_op) == NOT
12421 || (GET_CODE (shift_op) == XOR
12422 && CONST_INT_P (XEXP (shift_op, 1))
12423 && CONST_INT_P (shift_count)
12424 && HWI_COMPUTABLE_MODE_P (mode)
12425 && (UINTVAL (XEXP (shift_op, 1))
12426 == HOST_WIDE_INT_1U
12427 << INTVAL (shift_count))))
12428 {
12429 op0
12430 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12431 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12432 code = (code == NE ? EQ : NE);
12433 continue;
12434 }
12435 }
12436 break;
12437
12438 case ASHIFT:
12439 /* If we have (compare (ashift FOO N) (const_int C)) and
12440 the high order N bits of FOO (N+1 if an inequality comparison)
12441 are known to be zero, we can do this by comparing FOO with C
12442 shifted right N bits so long as the low-order N bits of C are
12443 zero. */
12444 if (CONST_INT_P (XEXP (op0, 1))
12445 && INTVAL (XEXP (op0, 1)) >= 0
12446 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12447 < HOST_BITS_PER_WIDE_INT)
12448 && (((unsigned HOST_WIDE_INT) const_op
12449 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12450 - 1)) == 0)
12451 && mode_width <= HOST_BITS_PER_WIDE_INT
12452 && (nonzero_bits (XEXP (op0, 0), mode)
12453 & ~(mask >> (INTVAL (XEXP (op0, 1))
12454 + ! equality_comparison_p))) == 0)
12455 {
12456 /* We must perform a logical shift, not an arithmetic one,
12457 as we want the top N bits of C to be zero. */
12458 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12459
12460 temp >>= INTVAL (XEXP (op0, 1));
12461 op1 = gen_int_mode (temp, mode);
12462 op0 = XEXP (op0, 0);
12463 continue;
12464 }
12465
12466 /* If we are doing a sign bit comparison, it means we are testing
12467 a particular bit. Convert it to the appropriate AND. */
12468 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12469 && mode_width <= HOST_BITS_PER_WIDE_INT)
12470 {
12471 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12472 (HOST_WIDE_INT_1U
12473 << (mode_width - 1
12474 - INTVAL (XEXP (op0, 1)))));
12475 code = (code == LT ? NE : EQ);
12476 continue;
12477 }
12478
12479 /* If this an equality comparison with zero and we are shifting
12480 the low bit to the sign bit, we can convert this to an AND of the
12481 low-order bit. */
12482 if (const_op == 0 && equality_comparison_p
12483 && CONST_INT_P (XEXP (op0, 1))
12484 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12485 {
12486 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12487 continue;
12488 }
12489 break;
12490
12491 case ASHIFTRT:
12492 /* If this is an equality comparison with zero, we can do this
12493 as a logical shift, which might be much simpler. */
12494 if (equality_comparison_p && const_op == 0
12495 && CONST_INT_P (XEXP (op0, 1)))
12496 {
12497 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12498 XEXP (op0, 0),
12499 INTVAL (XEXP (op0, 1)));
12500 continue;
12501 }
12502
12503 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12504 do the comparison in a narrower mode. */
12505 if (! unsigned_comparison_p
12506 && CONST_INT_P (XEXP (op0, 1))
12507 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12508 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12509 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12510 MODE_INT, 1)) != BLKmode
12511 && (((unsigned HOST_WIDE_INT) const_op
12512 + (GET_MODE_MASK (tmode) >> 1) + 1)
12513 <= GET_MODE_MASK (tmode)))
12514 {
12515 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12516 continue;
12517 }
12518
12519 /* Likewise if OP0 is a PLUS of a sign extension with a
12520 constant, which is usually represented with the PLUS
12521 between the shifts. */
12522 if (! unsigned_comparison_p
12523 && CONST_INT_P (XEXP (op0, 1))
12524 && GET_CODE (XEXP (op0, 0)) == PLUS
12525 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12526 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12527 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12528 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12529 MODE_INT, 1)) != BLKmode
12530 && (((unsigned HOST_WIDE_INT) const_op
12531 + (GET_MODE_MASK (tmode) >> 1) + 1)
12532 <= GET_MODE_MASK (tmode)))
12533 {
12534 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12535 rtx add_const = XEXP (XEXP (op0, 0), 1);
12536 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
12537 add_const, XEXP (op0, 1));
12538
12539 op0 = simplify_gen_binary (PLUS, tmode,
12540 gen_lowpart (tmode, inner),
12541 new_const);
12542 continue;
12543 }
12544
12545 /* FALLTHROUGH */
12546 case LSHIFTRT:
12547 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12548 the low order N bits of FOO are known to be zero, we can do this
12549 by comparing FOO with C shifted left N bits so long as no
12550 overflow occurs. Even if the low order N bits of FOO aren't known
12551 to be zero, if the comparison is >= or < we can use the same
12552 optimization and for > or <= by setting all the low
12553 order N bits in the comparison constant. */
12554 if (CONST_INT_P (XEXP (op0, 1))
12555 && INTVAL (XEXP (op0, 1)) > 0
12556 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12557 && mode_width <= HOST_BITS_PER_WIDE_INT
12558 && (((unsigned HOST_WIDE_INT) const_op
12559 + (GET_CODE (op0) != LSHIFTRT
12560 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12561 + 1)
12562 : 0))
12563 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12564 {
12565 unsigned HOST_WIDE_INT low_bits
12566 = (nonzero_bits (XEXP (op0, 0), mode)
12567 & ((HOST_WIDE_INT_1U
12568 << INTVAL (XEXP (op0, 1))) - 1));
12569 if (low_bits == 0 || !equality_comparison_p)
12570 {
12571 /* If the shift was logical, then we must make the condition
12572 unsigned. */
12573 if (GET_CODE (op0) == LSHIFTRT)
12574 code = unsigned_condition (code);
12575
12576 const_op = (unsigned HOST_WIDE_INT) const_op
12577 << INTVAL (XEXP (op0, 1));
12578 if (low_bits != 0
12579 && (code == GT || code == GTU
12580 || code == LE || code == LEU))
12581 const_op
12582 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12583 op1 = GEN_INT (const_op);
12584 op0 = XEXP (op0, 0);
12585 continue;
12586 }
12587 }
12588
12589 /* If we are using this shift to extract just the sign bit, we
12590 can replace this with an LT or GE comparison. */
12591 if (const_op == 0
12592 && (equality_comparison_p || sign_bit_comparison_p)
12593 && CONST_INT_P (XEXP (op0, 1))
12594 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12595 {
12596 op0 = XEXP (op0, 0);
12597 code = (code == NE || code == GT ? LT : GE);
12598 continue;
12599 }
12600 break;
12601
12602 default:
12603 break;
12604 }
12605
12606 break;
12607 }
12608
12609 /* Now make any compound operations involved in this comparison. Then,
12610 check for an outmost SUBREG on OP0 that is not doing anything or is
12611 paradoxical. The latter transformation must only be performed when
12612 it is known that the "extra" bits will be the same in op0 and op1 or
12613 that they don't matter. There are three cases to consider:
12614
12615 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12616 care bits and we can assume they have any convenient value. So
12617 making the transformation is safe.
12618
12619 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12620 In this case the upper bits of op0 are undefined. We should not make
12621 the simplification in that case as we do not know the contents of
12622 those bits.
12623
12624 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12625 In that case we know those bits are zeros or ones. We must also be
12626 sure that they are the same as the upper bits of op1.
12627
12628 We can never remove a SUBREG for a non-equality comparison because
12629 the sign bit is in a different place in the underlying object. */
12630
12631 rtx_code op0_mco_code = SET;
12632 if (op1 == const0_rtx)
12633 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12634
12635 op0 = make_compound_operation (op0, op0_mco_code);
12636 op1 = make_compound_operation (op1, SET);
12637
12638 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12639 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12640 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12641 && (code == NE || code == EQ))
12642 {
12643 if (paradoxical_subreg_p (op0))
12644 {
12645 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12646 implemented. */
12647 if (REG_P (SUBREG_REG (op0)))
12648 {
12649 op0 = SUBREG_REG (op0);
12650 op1 = gen_lowpart (GET_MODE (op0), op1);
12651 }
12652 }
12653 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12654 <= HOST_BITS_PER_WIDE_INT)
12655 && (nonzero_bits (SUBREG_REG (op0),
12656 GET_MODE (SUBREG_REG (op0)))
12657 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12658 {
12659 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12660
12661 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12662 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12663 op0 = SUBREG_REG (op0), op1 = tem;
12664 }
12665 }
12666
12667 /* We now do the opposite procedure: Some machines don't have compare
12668 insns in all modes. If OP0's mode is an integer mode smaller than a
12669 word and we can't do a compare in that mode, see if there is a larger
12670 mode for which we can do the compare. There are a number of cases in
12671 which we can use the wider mode. */
12672
12673 mode = GET_MODE (op0);
12674 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12675 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12676 && ! have_insn_for (COMPARE, mode))
12677 for (tmode = GET_MODE_WIDER_MODE (mode);
12678 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12679 tmode = GET_MODE_WIDER_MODE (tmode))
12680 if (have_insn_for (COMPARE, tmode))
12681 {
12682 int zero_extended;
12683
12684 /* If this is a test for negative, we can make an explicit
12685 test of the sign bit. Test this first so we can use
12686 a paradoxical subreg to extend OP0. */
12687
12688 if (op1 == const0_rtx && (code == LT || code == GE)
12689 && HWI_COMPUTABLE_MODE_P (mode))
12690 {
12691 unsigned HOST_WIDE_INT sign
12692 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12693 op0 = simplify_gen_binary (AND, tmode,
12694 gen_lowpart (tmode, op0),
12695 gen_int_mode (sign, tmode));
12696 code = (code == LT) ? NE : EQ;
12697 break;
12698 }
12699
12700 /* If the only nonzero bits in OP0 and OP1 are those in the
12701 narrower mode and this is an equality or unsigned comparison,
12702 we can use the wider mode. Similarly for sign-extended
12703 values, in which case it is true for all comparisons. */
12704 zero_extended = ((code == EQ || code == NE
12705 || code == GEU || code == GTU
12706 || code == LEU || code == LTU)
12707 && (nonzero_bits (op0, tmode)
12708 & ~GET_MODE_MASK (mode)) == 0
12709 && ((CONST_INT_P (op1)
12710 || (nonzero_bits (op1, tmode)
12711 & ~GET_MODE_MASK (mode)) == 0)));
12712
12713 if (zero_extended
12714 || ((num_sign_bit_copies (op0, tmode)
12715 > (unsigned int) (GET_MODE_PRECISION (tmode)
12716 - GET_MODE_PRECISION (mode)))
12717 && (num_sign_bit_copies (op1, tmode)
12718 > (unsigned int) (GET_MODE_PRECISION (tmode)
12719 - GET_MODE_PRECISION (mode)))))
12720 {
12721 /* If OP0 is an AND and we don't have an AND in MODE either,
12722 make a new AND in the proper mode. */
12723 if (GET_CODE (op0) == AND
12724 && !have_insn_for (AND, mode))
12725 op0 = simplify_gen_binary (AND, tmode,
12726 gen_lowpart (tmode,
12727 XEXP (op0, 0)),
12728 gen_lowpart (tmode,
12729 XEXP (op0, 1)));
12730 else
12731 {
12732 if (zero_extended)
12733 {
12734 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12735 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12736 }
12737 else
12738 {
12739 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12740 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12741 }
12742 break;
12743 }
12744 }
12745 }
12746
12747 /* We may have changed the comparison operands. Re-canonicalize. */
12748 if (swap_commutative_operands_p (op0, op1))
12749 {
12750 std::swap (op0, op1);
12751 code = swap_condition (code);
12752 }
12753
12754 /* If this machine only supports a subset of valid comparisons, see if we
12755 can convert an unsupported one into a supported one. */
12756 target_canonicalize_comparison (&code, &op0, &op1, 0);
12757
12758 *pop0 = op0;
12759 *pop1 = op1;
12760
12761 return code;
12762 }
12763 \f
12764 /* Utility function for record_value_for_reg. Count number of
12765 rtxs in X. */
12766 static int
12767 count_rtxs (rtx x)
12768 {
12769 enum rtx_code code = GET_CODE (x);
12770 const char *fmt;
12771 int i, j, ret = 1;
12772
12773 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12774 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12775 {
12776 rtx x0 = XEXP (x, 0);
12777 rtx x1 = XEXP (x, 1);
12778
12779 if (x0 == x1)
12780 return 1 + 2 * count_rtxs (x0);
12781
12782 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12783 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12784 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12785 return 2 + 2 * count_rtxs (x0)
12786 + count_rtxs (x == XEXP (x1, 0)
12787 ? XEXP (x1, 1) : XEXP (x1, 0));
12788
12789 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12790 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12791 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12792 return 2 + 2 * count_rtxs (x1)
12793 + count_rtxs (x == XEXP (x0, 0)
12794 ? XEXP (x0, 1) : XEXP (x0, 0));
12795 }
12796
12797 fmt = GET_RTX_FORMAT (code);
12798 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12799 if (fmt[i] == 'e')
12800 ret += count_rtxs (XEXP (x, i));
12801 else if (fmt[i] == 'E')
12802 for (j = 0; j < XVECLEN (x, i); j++)
12803 ret += count_rtxs (XVECEXP (x, i, j));
12804
12805 return ret;
12806 }
12807 \f
12808 /* Utility function for following routine. Called when X is part of a value
12809 being stored into last_set_value. Sets last_set_table_tick
12810 for each register mentioned. Similar to mention_regs in cse.c */
12811
12812 static void
12813 update_table_tick (rtx x)
12814 {
12815 enum rtx_code code = GET_CODE (x);
12816 const char *fmt = GET_RTX_FORMAT (code);
12817 int i, j;
12818
12819 if (code == REG)
12820 {
12821 unsigned int regno = REGNO (x);
12822 unsigned int endregno = END_REGNO (x);
12823 unsigned int r;
12824
12825 for (r = regno; r < endregno; r++)
12826 {
12827 reg_stat_type *rsp = &reg_stat[r];
12828 rsp->last_set_table_tick = label_tick;
12829 }
12830
12831 return;
12832 }
12833
12834 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12835 if (fmt[i] == 'e')
12836 {
12837 /* Check for identical subexpressions. If x contains
12838 identical subexpression we only have to traverse one of
12839 them. */
12840 if (i == 0 && ARITHMETIC_P (x))
12841 {
12842 /* Note that at this point x1 has already been
12843 processed. */
12844 rtx x0 = XEXP (x, 0);
12845 rtx x1 = XEXP (x, 1);
12846
12847 /* If x0 and x1 are identical then there is no need to
12848 process x0. */
12849 if (x0 == x1)
12850 break;
12851
12852 /* If x0 is identical to a subexpression of x1 then while
12853 processing x1, x0 has already been processed. Thus we
12854 are done with x. */
12855 if (ARITHMETIC_P (x1)
12856 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12857 break;
12858
12859 /* If x1 is identical to a subexpression of x0 then we
12860 still have to process the rest of x0. */
12861 if (ARITHMETIC_P (x0)
12862 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12863 {
12864 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12865 break;
12866 }
12867 }
12868
12869 update_table_tick (XEXP (x, i));
12870 }
12871 else if (fmt[i] == 'E')
12872 for (j = 0; j < XVECLEN (x, i); j++)
12873 update_table_tick (XVECEXP (x, i, j));
12874 }
12875
12876 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12877 are saying that the register is clobbered and we no longer know its
12878 value. If INSN is zero, don't update reg_stat[].last_set; this is
12879 only permitted with VALUE also zero and is used to invalidate the
12880 register. */
12881
12882 static void
12883 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
12884 {
12885 unsigned int regno = REGNO (reg);
12886 unsigned int endregno = END_REGNO (reg);
12887 unsigned int i;
12888 reg_stat_type *rsp;
12889
12890 /* If VALUE contains REG and we have a previous value for REG, substitute
12891 the previous value. */
12892 if (value && insn && reg_overlap_mentioned_p (reg, value))
12893 {
12894 rtx tem;
12895
12896 /* Set things up so get_last_value is allowed to see anything set up to
12897 our insn. */
12898 subst_low_luid = DF_INSN_LUID (insn);
12899 tem = get_last_value (reg);
12900
12901 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12902 it isn't going to be useful and will take a lot of time to process,
12903 so just use the CLOBBER. */
12904
12905 if (tem)
12906 {
12907 if (ARITHMETIC_P (tem)
12908 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12909 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12910 tem = XEXP (tem, 0);
12911 else if (count_occurrences (value, reg, 1) >= 2)
12912 {
12913 /* If there are two or more occurrences of REG in VALUE,
12914 prevent the value from growing too much. */
12915 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12916 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12917 }
12918
12919 value = replace_rtx (copy_rtx (value), reg, tem);
12920 }
12921 }
12922
12923 /* For each register modified, show we don't know its value, that
12924 we don't know about its bitwise content, that its value has been
12925 updated, and that we don't know the location of the death of the
12926 register. */
12927 for (i = regno; i < endregno; i++)
12928 {
12929 rsp = &reg_stat[i];
12930
12931 if (insn)
12932 rsp->last_set = insn;
12933
12934 rsp->last_set_value = 0;
12935 rsp->last_set_mode = VOIDmode;
12936 rsp->last_set_nonzero_bits = 0;
12937 rsp->last_set_sign_bit_copies = 0;
12938 rsp->last_death = 0;
12939 rsp->truncated_to_mode = VOIDmode;
12940 }
12941
12942 /* Mark registers that are being referenced in this value. */
12943 if (value)
12944 update_table_tick (value);
12945
12946 /* Now update the status of each register being set.
12947 If someone is using this register in this block, set this register
12948 to invalid since we will get confused between the two lives in this
12949 basic block. This makes using this register always invalid. In cse, we
12950 scan the table to invalidate all entries using this register, but this
12951 is too much work for us. */
12952
12953 for (i = regno; i < endregno; i++)
12954 {
12955 rsp = &reg_stat[i];
12956 rsp->last_set_label = label_tick;
12957 if (!insn
12958 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12959 rsp->last_set_invalid = 1;
12960 else
12961 rsp->last_set_invalid = 0;
12962 }
12963
12964 /* The value being assigned might refer to X (like in "x++;"). In that
12965 case, we must replace it with (clobber (const_int 0)) to prevent
12966 infinite loops. */
12967 rsp = &reg_stat[regno];
12968 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12969 {
12970 value = copy_rtx (value);
12971 if (!get_last_value_validate (&value, insn, label_tick, 1))
12972 value = 0;
12973 }
12974
12975 /* For the main register being modified, update the value, the mode, the
12976 nonzero bits, and the number of sign bit copies. */
12977
12978 rsp->last_set_value = value;
12979
12980 if (value)
12981 {
12982 machine_mode mode = GET_MODE (reg);
12983 subst_low_luid = DF_INSN_LUID (insn);
12984 rsp->last_set_mode = mode;
12985 if (GET_MODE_CLASS (mode) == MODE_INT
12986 && HWI_COMPUTABLE_MODE_P (mode))
12987 mode = nonzero_bits_mode;
12988 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12989 rsp->last_set_sign_bit_copies
12990 = num_sign_bit_copies (value, GET_MODE (reg));
12991 }
12992 }
12993
12994 /* Called via note_stores from record_dead_and_set_regs to handle one
12995 SET or CLOBBER in an insn. DATA is the instruction in which the
12996 set is occurring. */
12997
12998 static void
12999 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13000 {
13001 rtx_insn *record_dead_insn = (rtx_insn *) data;
13002
13003 if (GET_CODE (dest) == SUBREG)
13004 dest = SUBREG_REG (dest);
13005
13006 if (!record_dead_insn)
13007 {
13008 if (REG_P (dest))
13009 record_value_for_reg (dest, NULL, NULL_RTX);
13010 return;
13011 }
13012
13013 if (REG_P (dest))
13014 {
13015 /* If we are setting the whole register, we know its value. Otherwise
13016 show that we don't know the value. We can handle SUBREG in
13017 some cases. */
13018 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13019 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13020 else if (GET_CODE (setter) == SET
13021 && GET_CODE (SET_DEST (setter)) == SUBREG
13022 && SUBREG_REG (SET_DEST (setter)) == dest
13023 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13024 && subreg_lowpart_p (SET_DEST (setter)))
13025 record_value_for_reg (dest, record_dead_insn,
13026 gen_lowpart (GET_MODE (dest),
13027 SET_SRC (setter)));
13028 else
13029 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13030 }
13031 else if (MEM_P (dest)
13032 /* Ignore pushes, they clobber nothing. */
13033 && ! push_operand (dest, GET_MODE (dest)))
13034 mem_last_set = DF_INSN_LUID (record_dead_insn);
13035 }
13036
13037 /* Update the records of when each REG was most recently set or killed
13038 for the things done by INSN. This is the last thing done in processing
13039 INSN in the combiner loop.
13040
13041 We update reg_stat[], in particular fields last_set, last_set_value,
13042 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13043 last_death, and also the similar information mem_last_set (which insn
13044 most recently modified memory) and last_call_luid (which insn was the
13045 most recent subroutine call). */
13046
13047 static void
13048 record_dead_and_set_regs (rtx_insn *insn)
13049 {
13050 rtx link;
13051 unsigned int i;
13052
13053 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13054 {
13055 if (REG_NOTE_KIND (link) == REG_DEAD
13056 && REG_P (XEXP (link, 0)))
13057 {
13058 unsigned int regno = REGNO (XEXP (link, 0));
13059 unsigned int endregno = END_REGNO (XEXP (link, 0));
13060
13061 for (i = regno; i < endregno; i++)
13062 {
13063 reg_stat_type *rsp;
13064
13065 rsp = &reg_stat[i];
13066 rsp->last_death = insn;
13067 }
13068 }
13069 else if (REG_NOTE_KIND (link) == REG_INC)
13070 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13071 }
13072
13073 if (CALL_P (insn))
13074 {
13075 hard_reg_set_iterator hrsi;
13076 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13077 {
13078 reg_stat_type *rsp;
13079
13080 rsp = &reg_stat[i];
13081 rsp->last_set_invalid = 1;
13082 rsp->last_set = insn;
13083 rsp->last_set_value = 0;
13084 rsp->last_set_mode = VOIDmode;
13085 rsp->last_set_nonzero_bits = 0;
13086 rsp->last_set_sign_bit_copies = 0;
13087 rsp->last_death = 0;
13088 rsp->truncated_to_mode = VOIDmode;
13089 }
13090
13091 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13092
13093 /* We can't combine into a call pattern. Remember, though, that
13094 the return value register is set at this LUID. We could
13095 still replace a register with the return value from the
13096 wrong subroutine call! */
13097 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13098 }
13099 else
13100 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13101 }
13102
13103 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13104 register present in the SUBREG, so for each such SUBREG go back and
13105 adjust nonzero and sign bit information of the registers that are
13106 known to have some zero/sign bits set.
13107
13108 This is needed because when combine blows the SUBREGs away, the
13109 information on zero/sign bits is lost and further combines can be
13110 missed because of that. */
13111
13112 static void
13113 record_promoted_value (rtx_insn *insn, rtx subreg)
13114 {
13115 struct insn_link *links;
13116 rtx set;
13117 unsigned int regno = REGNO (SUBREG_REG (subreg));
13118 machine_mode mode = GET_MODE (subreg);
13119
13120 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13121 return;
13122
13123 for (links = LOG_LINKS (insn); links;)
13124 {
13125 reg_stat_type *rsp;
13126
13127 insn = links->insn;
13128 set = single_set (insn);
13129
13130 if (! set || !REG_P (SET_DEST (set))
13131 || REGNO (SET_DEST (set)) != regno
13132 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13133 {
13134 links = links->next;
13135 continue;
13136 }
13137
13138 rsp = &reg_stat[regno];
13139 if (rsp->last_set == insn)
13140 {
13141 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13142 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13143 }
13144
13145 if (REG_P (SET_SRC (set)))
13146 {
13147 regno = REGNO (SET_SRC (set));
13148 links = LOG_LINKS (insn);
13149 }
13150 else
13151 break;
13152 }
13153 }
13154
13155 /* Check if X, a register, is known to contain a value already
13156 truncated to MODE. In this case we can use a subreg to refer to
13157 the truncated value even though in the generic case we would need
13158 an explicit truncation. */
13159
13160 static bool
13161 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13162 {
13163 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13164 machine_mode truncated = rsp->truncated_to_mode;
13165
13166 if (truncated == 0
13167 || rsp->truncation_label < label_tick_ebb_start)
13168 return false;
13169 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
13170 return true;
13171 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13172 return true;
13173 return false;
13174 }
13175
13176 /* If X is a hard reg or a subreg record the mode that the register is
13177 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
13178 to turn a truncate into a subreg using this information. Return true
13179 if traversing X is complete. */
13180
13181 static bool
13182 record_truncated_value (rtx x)
13183 {
13184 machine_mode truncated_mode;
13185 reg_stat_type *rsp;
13186
13187 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13188 {
13189 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13190 truncated_mode = GET_MODE (x);
13191
13192 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
13193 return true;
13194
13195 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13196 return true;
13197
13198 x = SUBREG_REG (x);
13199 }
13200 /* ??? For hard-regs we now record everything. We might be able to
13201 optimize this using last_set_mode. */
13202 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13203 truncated_mode = GET_MODE (x);
13204 else
13205 return false;
13206
13207 rsp = &reg_stat[REGNO (x)];
13208 if (rsp->truncated_to_mode == 0
13209 || rsp->truncation_label < label_tick_ebb_start
13210 || (GET_MODE_SIZE (truncated_mode)
13211 < GET_MODE_SIZE (rsp->truncated_to_mode)))
13212 {
13213 rsp->truncated_to_mode = truncated_mode;
13214 rsp->truncation_label = label_tick;
13215 }
13216
13217 return true;
13218 }
13219
13220 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13221 the modes they are used in. This can help truning TRUNCATEs into
13222 SUBREGs. */
13223
13224 static void
13225 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13226 {
13227 subrtx_var_iterator::array_type array;
13228 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13229 if (record_truncated_value (*iter))
13230 iter.skip_subrtxes ();
13231 }
13232
13233 /* Scan X for promoted SUBREGs. For each one found,
13234 note what it implies to the registers used in it. */
13235
13236 static void
13237 check_promoted_subreg (rtx_insn *insn, rtx x)
13238 {
13239 if (GET_CODE (x) == SUBREG
13240 && SUBREG_PROMOTED_VAR_P (x)
13241 && REG_P (SUBREG_REG (x)))
13242 record_promoted_value (insn, x);
13243 else
13244 {
13245 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13246 int i, j;
13247
13248 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13249 switch (format[i])
13250 {
13251 case 'e':
13252 check_promoted_subreg (insn, XEXP (x, i));
13253 break;
13254 case 'V':
13255 case 'E':
13256 if (XVEC (x, i) != 0)
13257 for (j = 0; j < XVECLEN (x, i); j++)
13258 check_promoted_subreg (insn, XVECEXP (x, i, j));
13259 break;
13260 }
13261 }
13262 }
13263 \f
13264 /* Verify that all the registers and memory references mentioned in *LOC are
13265 still valid. *LOC was part of a value set in INSN when label_tick was
13266 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13267 the invalid references with (clobber (const_int 0)) and return 1. This
13268 replacement is useful because we often can get useful information about
13269 the form of a value (e.g., if it was produced by a shift that always
13270 produces -1 or 0) even though we don't know exactly what registers it
13271 was produced from. */
13272
13273 static int
13274 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13275 {
13276 rtx x = *loc;
13277 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13278 int len = GET_RTX_LENGTH (GET_CODE (x));
13279 int i, j;
13280
13281 if (REG_P (x))
13282 {
13283 unsigned int regno = REGNO (x);
13284 unsigned int endregno = END_REGNO (x);
13285 unsigned int j;
13286
13287 for (j = regno; j < endregno; j++)
13288 {
13289 reg_stat_type *rsp = &reg_stat[j];
13290 if (rsp->last_set_invalid
13291 /* If this is a pseudo-register that was only set once and not
13292 live at the beginning of the function, it is always valid. */
13293 || (! (regno >= FIRST_PSEUDO_REGISTER
13294 && regno < reg_n_sets_max
13295 && REG_N_SETS (regno) == 1
13296 && (!REGNO_REG_SET_P
13297 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13298 regno)))
13299 && rsp->last_set_label > tick))
13300 {
13301 if (replace)
13302 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13303 return replace;
13304 }
13305 }
13306
13307 return 1;
13308 }
13309 /* If this is a memory reference, make sure that there were no stores after
13310 it that might have clobbered the value. We don't have alias info, so we
13311 assume any store invalidates it. Moreover, we only have local UIDs, so
13312 we also assume that there were stores in the intervening basic blocks. */
13313 else if (MEM_P (x) && !MEM_READONLY_P (x)
13314 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13315 {
13316 if (replace)
13317 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13318 return replace;
13319 }
13320
13321 for (i = 0; i < len; i++)
13322 {
13323 if (fmt[i] == 'e')
13324 {
13325 /* Check for identical subexpressions. If x contains
13326 identical subexpression we only have to traverse one of
13327 them. */
13328 if (i == 1 && ARITHMETIC_P (x))
13329 {
13330 /* Note that at this point x0 has already been checked
13331 and found valid. */
13332 rtx x0 = XEXP (x, 0);
13333 rtx x1 = XEXP (x, 1);
13334
13335 /* If x0 and x1 are identical then x is also valid. */
13336 if (x0 == x1)
13337 return 1;
13338
13339 /* If x1 is identical to a subexpression of x0 then
13340 while checking x0, x1 has already been checked. Thus
13341 it is valid and so as x. */
13342 if (ARITHMETIC_P (x0)
13343 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13344 return 1;
13345
13346 /* If x0 is identical to a subexpression of x1 then x is
13347 valid iff the rest of x1 is valid. */
13348 if (ARITHMETIC_P (x1)
13349 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13350 return
13351 get_last_value_validate (&XEXP (x1,
13352 x0 == XEXP (x1, 0) ? 1 : 0),
13353 insn, tick, replace);
13354 }
13355
13356 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13357 replace) == 0)
13358 return 0;
13359 }
13360 else if (fmt[i] == 'E')
13361 for (j = 0; j < XVECLEN (x, i); j++)
13362 if (get_last_value_validate (&XVECEXP (x, i, j),
13363 insn, tick, replace) == 0)
13364 return 0;
13365 }
13366
13367 /* If we haven't found a reason for it to be invalid, it is valid. */
13368 return 1;
13369 }
13370
13371 /* Get the last value assigned to X, if known. Some registers
13372 in the value may be replaced with (clobber (const_int 0)) if their value
13373 is known longer known reliably. */
13374
13375 static rtx
13376 get_last_value (const_rtx x)
13377 {
13378 unsigned int regno;
13379 rtx value;
13380 reg_stat_type *rsp;
13381
13382 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13383 then convert it to the desired mode. If this is a paradoxical SUBREG,
13384 we cannot predict what values the "extra" bits might have. */
13385 if (GET_CODE (x) == SUBREG
13386 && subreg_lowpart_p (x)
13387 && !paradoxical_subreg_p (x)
13388 && (value = get_last_value (SUBREG_REG (x))) != 0)
13389 return gen_lowpart (GET_MODE (x), value);
13390
13391 if (!REG_P (x))
13392 return 0;
13393
13394 regno = REGNO (x);
13395 rsp = &reg_stat[regno];
13396 value = rsp->last_set_value;
13397
13398 /* If we don't have a value, or if it isn't for this basic block and
13399 it's either a hard register, set more than once, or it's a live
13400 at the beginning of the function, return 0.
13401
13402 Because if it's not live at the beginning of the function then the reg
13403 is always set before being used (is never used without being set).
13404 And, if it's set only once, and it's always set before use, then all
13405 uses must have the same last value, even if it's not from this basic
13406 block. */
13407
13408 if (value == 0
13409 || (rsp->last_set_label < label_tick_ebb_start
13410 && (regno < FIRST_PSEUDO_REGISTER
13411 || regno >= reg_n_sets_max
13412 || REG_N_SETS (regno) != 1
13413 || REGNO_REG_SET_P
13414 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13415 return 0;
13416
13417 /* If the value was set in a later insn than the ones we are processing,
13418 we can't use it even if the register was only set once. */
13419 if (rsp->last_set_label == label_tick
13420 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13421 return 0;
13422
13423 /* If fewer bits were set than what we are asked for now, we cannot use
13424 the value. */
13425 if (GET_MODE_PRECISION (rsp->last_set_mode)
13426 < GET_MODE_PRECISION (GET_MODE (x)))
13427 return 0;
13428
13429 /* If the value has all its registers valid, return it. */
13430 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13431 return value;
13432
13433 /* Otherwise, make a copy and replace any invalid register with
13434 (clobber (const_int 0)). If that fails for some reason, return 0. */
13435
13436 value = copy_rtx (value);
13437 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13438 return value;
13439
13440 return 0;
13441 }
13442 \f
13443 /* Return nonzero if expression X refers to a REG or to memory
13444 that is set in an instruction more recent than FROM_LUID. */
13445
13446 static int
13447 use_crosses_set_p (const_rtx x, int from_luid)
13448 {
13449 const char *fmt;
13450 int i;
13451 enum rtx_code code = GET_CODE (x);
13452
13453 if (code == REG)
13454 {
13455 unsigned int regno = REGNO (x);
13456 unsigned endreg = END_REGNO (x);
13457
13458 #ifdef PUSH_ROUNDING
13459 /* Don't allow uses of the stack pointer to be moved,
13460 because we don't know whether the move crosses a push insn. */
13461 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13462 return 1;
13463 #endif
13464 for (; regno < endreg; regno++)
13465 {
13466 reg_stat_type *rsp = &reg_stat[regno];
13467 if (rsp->last_set
13468 && rsp->last_set_label == label_tick
13469 && DF_INSN_LUID (rsp->last_set) > from_luid)
13470 return 1;
13471 }
13472 return 0;
13473 }
13474
13475 if (code == MEM && mem_last_set > from_luid)
13476 return 1;
13477
13478 fmt = GET_RTX_FORMAT (code);
13479
13480 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13481 {
13482 if (fmt[i] == 'E')
13483 {
13484 int j;
13485 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13486 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13487 return 1;
13488 }
13489 else if (fmt[i] == 'e'
13490 && use_crosses_set_p (XEXP (x, i), from_luid))
13491 return 1;
13492 }
13493 return 0;
13494 }
13495 \f
13496 /* Define three variables used for communication between the following
13497 routines. */
13498
13499 static unsigned int reg_dead_regno, reg_dead_endregno;
13500 static int reg_dead_flag;
13501
13502 /* Function called via note_stores from reg_dead_at_p.
13503
13504 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13505 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13506
13507 static void
13508 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13509 {
13510 unsigned int regno, endregno;
13511
13512 if (!REG_P (dest))
13513 return;
13514
13515 regno = REGNO (dest);
13516 endregno = END_REGNO (dest);
13517 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13518 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13519 }
13520
13521 /* Return nonzero if REG is known to be dead at INSN.
13522
13523 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13524 referencing REG, it is dead. If we hit a SET referencing REG, it is
13525 live. Otherwise, see if it is live or dead at the start of the basic
13526 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13527 must be assumed to be always live. */
13528
13529 static int
13530 reg_dead_at_p (rtx reg, rtx_insn *insn)
13531 {
13532 basic_block block;
13533 unsigned int i;
13534
13535 /* Set variables for reg_dead_at_p_1. */
13536 reg_dead_regno = REGNO (reg);
13537 reg_dead_endregno = END_REGNO (reg);
13538
13539 reg_dead_flag = 0;
13540
13541 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13542 we allow the machine description to decide whether use-and-clobber
13543 patterns are OK. */
13544 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13545 {
13546 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13547 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13548 return 0;
13549 }
13550
13551 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13552 beginning of basic block. */
13553 block = BLOCK_FOR_INSN (insn);
13554 for (;;)
13555 {
13556 if (INSN_P (insn))
13557 {
13558 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13559 return 1;
13560
13561 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13562 if (reg_dead_flag)
13563 return reg_dead_flag == 1 ? 1 : 0;
13564
13565 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13566 return 1;
13567 }
13568
13569 if (insn == BB_HEAD (block))
13570 break;
13571
13572 insn = PREV_INSN (insn);
13573 }
13574
13575 /* Look at live-in sets for the basic block that we were in. */
13576 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13577 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13578 return 0;
13579
13580 return 1;
13581 }
13582 \f
13583 /* Note hard registers in X that are used. */
13584
13585 static void
13586 mark_used_regs_combine (rtx x)
13587 {
13588 RTX_CODE code = GET_CODE (x);
13589 unsigned int regno;
13590 int i;
13591
13592 switch (code)
13593 {
13594 case LABEL_REF:
13595 case SYMBOL_REF:
13596 case CONST:
13597 CASE_CONST_ANY:
13598 case PC:
13599 case ADDR_VEC:
13600 case ADDR_DIFF_VEC:
13601 case ASM_INPUT:
13602 /* CC0 must die in the insn after it is set, so we don't need to take
13603 special note of it here. */
13604 case CC0:
13605 return;
13606
13607 case CLOBBER:
13608 /* If we are clobbering a MEM, mark any hard registers inside the
13609 address as used. */
13610 if (MEM_P (XEXP (x, 0)))
13611 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13612 return;
13613
13614 case REG:
13615 regno = REGNO (x);
13616 /* A hard reg in a wide mode may really be multiple registers.
13617 If so, mark all of them just like the first. */
13618 if (regno < FIRST_PSEUDO_REGISTER)
13619 {
13620 /* None of this applies to the stack, frame or arg pointers. */
13621 if (regno == STACK_POINTER_REGNUM
13622 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13623 && regno == HARD_FRAME_POINTER_REGNUM)
13624 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13625 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13626 || regno == FRAME_POINTER_REGNUM)
13627 return;
13628
13629 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13630 }
13631 return;
13632
13633 case SET:
13634 {
13635 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13636 the address. */
13637 rtx testreg = SET_DEST (x);
13638
13639 while (GET_CODE (testreg) == SUBREG
13640 || GET_CODE (testreg) == ZERO_EXTRACT
13641 || GET_CODE (testreg) == STRICT_LOW_PART)
13642 testreg = XEXP (testreg, 0);
13643
13644 if (MEM_P (testreg))
13645 mark_used_regs_combine (XEXP (testreg, 0));
13646
13647 mark_used_regs_combine (SET_SRC (x));
13648 }
13649 return;
13650
13651 default:
13652 break;
13653 }
13654
13655 /* Recursively scan the operands of this expression. */
13656
13657 {
13658 const char *fmt = GET_RTX_FORMAT (code);
13659
13660 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13661 {
13662 if (fmt[i] == 'e')
13663 mark_used_regs_combine (XEXP (x, i));
13664 else if (fmt[i] == 'E')
13665 {
13666 int j;
13667
13668 for (j = 0; j < XVECLEN (x, i); j++)
13669 mark_used_regs_combine (XVECEXP (x, i, j));
13670 }
13671 }
13672 }
13673 }
13674 \f
13675 /* Remove register number REGNO from the dead registers list of INSN.
13676
13677 Return the note used to record the death, if there was one. */
13678
13679 rtx
13680 remove_death (unsigned int regno, rtx_insn *insn)
13681 {
13682 rtx note = find_regno_note (insn, REG_DEAD, regno);
13683
13684 if (note)
13685 remove_note (insn, note);
13686
13687 return note;
13688 }
13689
13690 /* For each register (hardware or pseudo) used within expression X, if its
13691 death is in an instruction with luid between FROM_LUID (inclusive) and
13692 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13693 list headed by PNOTES.
13694
13695 That said, don't move registers killed by maybe_kill_insn.
13696
13697 This is done when X is being merged by combination into TO_INSN. These
13698 notes will then be distributed as needed. */
13699
13700 static void
13701 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13702 rtx *pnotes)
13703 {
13704 const char *fmt;
13705 int len, i;
13706 enum rtx_code code = GET_CODE (x);
13707
13708 if (code == REG)
13709 {
13710 unsigned int regno = REGNO (x);
13711 rtx_insn *where_dead = reg_stat[regno].last_death;
13712
13713 /* Don't move the register if it gets killed in between from and to. */
13714 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13715 && ! reg_referenced_p (x, maybe_kill_insn))
13716 return;
13717
13718 if (where_dead
13719 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13720 && DF_INSN_LUID (where_dead) >= from_luid
13721 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13722 {
13723 rtx note = remove_death (regno, where_dead);
13724
13725 /* It is possible for the call above to return 0. This can occur
13726 when last_death points to I2 or I1 that we combined with.
13727 In that case make a new note.
13728
13729 We must also check for the case where X is a hard register
13730 and NOTE is a death note for a range of hard registers
13731 including X. In that case, we must put REG_DEAD notes for
13732 the remaining registers in place of NOTE. */
13733
13734 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13735 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13736 > GET_MODE_SIZE (GET_MODE (x))))
13737 {
13738 unsigned int deadregno = REGNO (XEXP (note, 0));
13739 unsigned int deadend = END_REGNO (XEXP (note, 0));
13740 unsigned int ourend = END_REGNO (x);
13741 unsigned int i;
13742
13743 for (i = deadregno; i < deadend; i++)
13744 if (i < regno || i >= ourend)
13745 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13746 }
13747
13748 /* If we didn't find any note, or if we found a REG_DEAD note that
13749 covers only part of the given reg, and we have a multi-reg hard
13750 register, then to be safe we must check for REG_DEAD notes
13751 for each register other than the first. They could have
13752 their own REG_DEAD notes lying around. */
13753 else if ((note == 0
13754 || (note != 0
13755 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13756 < GET_MODE_SIZE (GET_MODE (x)))))
13757 && regno < FIRST_PSEUDO_REGISTER
13758 && REG_NREGS (x) > 1)
13759 {
13760 unsigned int ourend = END_REGNO (x);
13761 unsigned int i, offset;
13762 rtx oldnotes = 0;
13763
13764 if (note)
13765 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13766 else
13767 offset = 1;
13768
13769 for (i = regno + offset; i < ourend; i++)
13770 move_deaths (regno_reg_rtx[i],
13771 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13772 }
13773
13774 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13775 {
13776 XEXP (note, 1) = *pnotes;
13777 *pnotes = note;
13778 }
13779 else
13780 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13781 }
13782
13783 return;
13784 }
13785
13786 else if (GET_CODE (x) == SET)
13787 {
13788 rtx dest = SET_DEST (x);
13789
13790 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13791
13792 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13793 that accesses one word of a multi-word item, some
13794 piece of everything register in the expression is used by
13795 this insn, so remove any old death. */
13796 /* ??? So why do we test for equality of the sizes? */
13797
13798 if (GET_CODE (dest) == ZERO_EXTRACT
13799 || GET_CODE (dest) == STRICT_LOW_PART
13800 || (GET_CODE (dest) == SUBREG
13801 && (((GET_MODE_SIZE (GET_MODE (dest))
13802 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13803 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13804 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13805 {
13806 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13807 return;
13808 }
13809
13810 /* If this is some other SUBREG, we know it replaces the entire
13811 value, so use that as the destination. */
13812 if (GET_CODE (dest) == SUBREG)
13813 dest = SUBREG_REG (dest);
13814
13815 /* If this is a MEM, adjust deaths of anything used in the address.
13816 For a REG (the only other possibility), the entire value is
13817 being replaced so the old value is not used in this insn. */
13818
13819 if (MEM_P (dest))
13820 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13821 to_insn, pnotes);
13822 return;
13823 }
13824
13825 else if (GET_CODE (x) == CLOBBER)
13826 return;
13827
13828 len = GET_RTX_LENGTH (code);
13829 fmt = GET_RTX_FORMAT (code);
13830
13831 for (i = 0; i < len; i++)
13832 {
13833 if (fmt[i] == 'E')
13834 {
13835 int j;
13836 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13837 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13838 to_insn, pnotes);
13839 }
13840 else if (fmt[i] == 'e')
13841 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13842 }
13843 }
13844 \f
13845 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13846 pattern of an insn. X must be a REG. */
13847
13848 static int
13849 reg_bitfield_target_p (rtx x, rtx body)
13850 {
13851 int i;
13852
13853 if (GET_CODE (body) == SET)
13854 {
13855 rtx dest = SET_DEST (body);
13856 rtx target;
13857 unsigned int regno, tregno, endregno, endtregno;
13858
13859 if (GET_CODE (dest) == ZERO_EXTRACT)
13860 target = XEXP (dest, 0);
13861 else if (GET_CODE (dest) == STRICT_LOW_PART)
13862 target = SUBREG_REG (XEXP (dest, 0));
13863 else
13864 return 0;
13865
13866 if (GET_CODE (target) == SUBREG)
13867 target = SUBREG_REG (target);
13868
13869 if (!REG_P (target))
13870 return 0;
13871
13872 tregno = REGNO (target), regno = REGNO (x);
13873 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13874 return target == x;
13875
13876 endtregno = end_hard_regno (GET_MODE (target), tregno);
13877 endregno = end_hard_regno (GET_MODE (x), regno);
13878
13879 return endregno > tregno && regno < endtregno;
13880 }
13881
13882 else if (GET_CODE (body) == PARALLEL)
13883 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13884 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13885 return 1;
13886
13887 return 0;
13888 }
13889 \f
13890 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13891 as appropriate. I3 and I2 are the insns resulting from the combination
13892 insns including FROM (I2 may be zero).
13893
13894 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13895 not need REG_DEAD notes because they are being substituted for. This
13896 saves searching in the most common cases.
13897
13898 Each note in the list is either ignored or placed on some insns, depending
13899 on the type of note. */
13900
13901 static void
13902 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
13903 rtx elim_i2, rtx elim_i1, rtx elim_i0)
13904 {
13905 rtx note, next_note;
13906 rtx tem_note;
13907 rtx_insn *tem_insn;
13908
13909 for (note = notes; note; note = next_note)
13910 {
13911 rtx_insn *place = 0, *place2 = 0;
13912
13913 next_note = XEXP (note, 1);
13914 switch (REG_NOTE_KIND (note))
13915 {
13916 case REG_BR_PROB:
13917 case REG_BR_PRED:
13918 /* Doesn't matter much where we put this, as long as it's somewhere.
13919 It is preferable to keep these notes on branches, which is most
13920 likely to be i3. */
13921 place = i3;
13922 break;
13923
13924 case REG_NON_LOCAL_GOTO:
13925 if (JUMP_P (i3))
13926 place = i3;
13927 else
13928 {
13929 gcc_assert (i2 && JUMP_P (i2));
13930 place = i2;
13931 }
13932 break;
13933
13934 case REG_EH_REGION:
13935 /* These notes must remain with the call or trapping instruction. */
13936 if (CALL_P (i3))
13937 place = i3;
13938 else if (i2 && CALL_P (i2))
13939 place = i2;
13940 else
13941 {
13942 gcc_assert (cfun->can_throw_non_call_exceptions);
13943 if (may_trap_p (i3))
13944 place = i3;
13945 else if (i2 && may_trap_p (i2))
13946 place = i2;
13947 /* ??? Otherwise assume we've combined things such that we
13948 can now prove that the instructions can't trap. Drop the
13949 note in this case. */
13950 }
13951 break;
13952
13953 case REG_ARGS_SIZE:
13954 /* ??? How to distribute between i3-i1. Assume i3 contains the
13955 entire adjustment. Assert i3 contains at least some adjust. */
13956 if (!noop_move_p (i3))
13957 {
13958 int old_size, args_size = INTVAL (XEXP (note, 0));
13959 /* fixup_args_size_notes looks at REG_NORETURN note,
13960 so ensure the note is placed there first. */
13961 if (CALL_P (i3))
13962 {
13963 rtx *np;
13964 for (np = &next_note; *np; np = &XEXP (*np, 1))
13965 if (REG_NOTE_KIND (*np) == REG_NORETURN)
13966 {
13967 rtx n = *np;
13968 *np = XEXP (n, 1);
13969 XEXP (n, 1) = REG_NOTES (i3);
13970 REG_NOTES (i3) = n;
13971 break;
13972 }
13973 }
13974 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13975 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13976 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13977 gcc_assert (old_size != args_size
13978 || (CALL_P (i3)
13979 && !ACCUMULATE_OUTGOING_ARGS
13980 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13981 }
13982 break;
13983
13984 case REG_NORETURN:
13985 case REG_SETJMP:
13986 case REG_TM:
13987 case REG_CALL_DECL:
13988 /* These notes must remain with the call. It should not be
13989 possible for both I2 and I3 to be a call. */
13990 if (CALL_P (i3))
13991 place = i3;
13992 else
13993 {
13994 gcc_assert (i2 && CALL_P (i2));
13995 place = i2;
13996 }
13997 break;
13998
13999 case REG_UNUSED:
14000 /* Any clobbers for i3 may still exist, and so we must process
14001 REG_UNUSED notes from that insn.
14002
14003 Any clobbers from i2 or i1 can only exist if they were added by
14004 recog_for_combine. In that case, recog_for_combine created the
14005 necessary REG_UNUSED notes. Trying to keep any original
14006 REG_UNUSED notes from these insns can cause incorrect output
14007 if it is for the same register as the original i3 dest.
14008 In that case, we will notice that the register is set in i3,
14009 and then add a REG_UNUSED note for the destination of i3, which
14010 is wrong. However, it is possible to have REG_UNUSED notes from
14011 i2 or i1 for register which were both used and clobbered, so
14012 we keep notes from i2 or i1 if they will turn into REG_DEAD
14013 notes. */
14014
14015 /* If this register is set or clobbered in I3, put the note there
14016 unless there is one already. */
14017 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14018 {
14019 if (from_insn != i3)
14020 break;
14021
14022 if (! (REG_P (XEXP (note, 0))
14023 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14024 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14025 place = i3;
14026 }
14027 /* Otherwise, if this register is used by I3, then this register
14028 now dies here, so we must put a REG_DEAD note here unless there
14029 is one already. */
14030 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14031 && ! (REG_P (XEXP (note, 0))
14032 ? find_regno_note (i3, REG_DEAD,
14033 REGNO (XEXP (note, 0)))
14034 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14035 {
14036 PUT_REG_NOTE_KIND (note, REG_DEAD);
14037 place = i3;
14038 }
14039 break;
14040
14041 case REG_EQUAL:
14042 case REG_EQUIV:
14043 case REG_NOALIAS:
14044 /* These notes say something about results of an insn. We can
14045 only support them if they used to be on I3 in which case they
14046 remain on I3. Otherwise they are ignored.
14047
14048 If the note refers to an expression that is not a constant, we
14049 must also ignore the note since we cannot tell whether the
14050 equivalence is still true. It might be possible to do
14051 slightly better than this (we only have a problem if I2DEST
14052 or I1DEST is present in the expression), but it doesn't
14053 seem worth the trouble. */
14054
14055 if (from_insn == i3
14056 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14057 place = i3;
14058 break;
14059
14060 case REG_INC:
14061 /* These notes say something about how a register is used. They must
14062 be present on any use of the register in I2 or I3. */
14063 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14064 place = i3;
14065
14066 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14067 {
14068 if (place)
14069 place2 = i2;
14070 else
14071 place = i2;
14072 }
14073 break;
14074
14075 case REG_LABEL_TARGET:
14076 case REG_LABEL_OPERAND:
14077 /* This can show up in several ways -- either directly in the
14078 pattern, or hidden off in the constant pool with (or without?)
14079 a REG_EQUAL note. */
14080 /* ??? Ignore the without-reg_equal-note problem for now. */
14081 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14082 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14083 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14084 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14085 place = i3;
14086
14087 if (i2
14088 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14089 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14090 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14091 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14092 {
14093 if (place)
14094 place2 = i2;
14095 else
14096 place = i2;
14097 }
14098
14099 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14100 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14101 there. */
14102 if (place && JUMP_P (place)
14103 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14104 && (JUMP_LABEL (place) == NULL
14105 || JUMP_LABEL (place) == XEXP (note, 0)))
14106 {
14107 rtx label = JUMP_LABEL (place);
14108
14109 if (!label)
14110 JUMP_LABEL (place) = XEXP (note, 0);
14111 else if (LABEL_P (label))
14112 LABEL_NUSES (label)--;
14113 }
14114
14115 if (place2 && JUMP_P (place2)
14116 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14117 && (JUMP_LABEL (place2) == NULL
14118 || JUMP_LABEL (place2) == XEXP (note, 0)))
14119 {
14120 rtx label = JUMP_LABEL (place2);
14121
14122 if (!label)
14123 JUMP_LABEL (place2) = XEXP (note, 0);
14124 else if (LABEL_P (label))
14125 LABEL_NUSES (label)--;
14126 place2 = 0;
14127 }
14128 break;
14129
14130 case REG_NONNEG:
14131 /* This note says something about the value of a register prior
14132 to the execution of an insn. It is too much trouble to see
14133 if the note is still correct in all situations. It is better
14134 to simply delete it. */
14135 break;
14136
14137 case REG_DEAD:
14138 /* If we replaced the right hand side of FROM_INSN with a
14139 REG_EQUAL note, the original use of the dying register
14140 will not have been combined into I3 and I2. In such cases,
14141 FROM_INSN is guaranteed to be the first of the combined
14142 instructions, so we simply need to search back before
14143 FROM_INSN for the previous use or set of this register,
14144 then alter the notes there appropriately.
14145
14146 If the register is used as an input in I3, it dies there.
14147 Similarly for I2, if it is nonzero and adjacent to I3.
14148
14149 If the register is not used as an input in either I3 or I2
14150 and it is not one of the registers we were supposed to eliminate,
14151 there are two possibilities. We might have a non-adjacent I2
14152 or we might have somehow eliminated an additional register
14153 from a computation. For example, we might have had A & B where
14154 we discover that B will always be zero. In this case we will
14155 eliminate the reference to A.
14156
14157 In both cases, we must search to see if we can find a previous
14158 use of A and put the death note there. */
14159
14160 if (from_insn
14161 && from_insn == i2mod
14162 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14163 tem_insn = from_insn;
14164 else
14165 {
14166 if (from_insn
14167 && CALL_P (from_insn)
14168 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14169 place = from_insn;
14170 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14171 place = i3;
14172 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14173 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14174 place = i2;
14175 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14176 && !(i2mod
14177 && reg_overlap_mentioned_p (XEXP (note, 0),
14178 i2mod_old_rhs)))
14179 || rtx_equal_p (XEXP (note, 0), elim_i1)
14180 || rtx_equal_p (XEXP (note, 0), elim_i0))
14181 break;
14182 tem_insn = i3;
14183 /* If the new I2 sets the same register that is marked dead
14184 in the note, we do not know where to put the note.
14185 Give up. */
14186 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14187 break;
14188 }
14189
14190 if (place == 0)
14191 {
14192 basic_block bb = this_basic_block;
14193
14194 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14195 {
14196 if (!NONDEBUG_INSN_P (tem_insn))
14197 {
14198 if (tem_insn == BB_HEAD (bb))
14199 break;
14200 continue;
14201 }
14202
14203 /* If the register is being set at TEM_INSN, see if that is all
14204 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14205 into a REG_UNUSED note instead. Don't delete sets to
14206 global register vars. */
14207 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14208 || !global_regs[REGNO (XEXP (note, 0))])
14209 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14210 {
14211 rtx set = single_set (tem_insn);
14212 rtx inner_dest = 0;
14213 rtx_insn *cc0_setter = NULL;
14214
14215 if (set != 0)
14216 for (inner_dest = SET_DEST (set);
14217 (GET_CODE (inner_dest) == STRICT_LOW_PART
14218 || GET_CODE (inner_dest) == SUBREG
14219 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14220 inner_dest = XEXP (inner_dest, 0))
14221 ;
14222
14223 /* Verify that it was the set, and not a clobber that
14224 modified the register.
14225
14226 CC0 targets must be careful to maintain setter/user
14227 pairs. If we cannot delete the setter due to side
14228 effects, mark the user with an UNUSED note instead
14229 of deleting it. */
14230
14231 if (set != 0 && ! side_effects_p (SET_SRC (set))
14232 && rtx_equal_p (XEXP (note, 0), inner_dest)
14233 && (!HAVE_cc0
14234 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14235 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14236 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14237 {
14238 /* Move the notes and links of TEM_INSN elsewhere.
14239 This might delete other dead insns recursively.
14240 First set the pattern to something that won't use
14241 any register. */
14242 rtx old_notes = REG_NOTES (tem_insn);
14243
14244 PATTERN (tem_insn) = pc_rtx;
14245 REG_NOTES (tem_insn) = NULL;
14246
14247 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14248 NULL_RTX, NULL_RTX, NULL_RTX);
14249 distribute_links (LOG_LINKS (tem_insn));
14250
14251 SET_INSN_DELETED (tem_insn);
14252 if (tem_insn == i2)
14253 i2 = NULL;
14254
14255 /* Delete the setter too. */
14256 if (cc0_setter)
14257 {
14258 PATTERN (cc0_setter) = pc_rtx;
14259 old_notes = REG_NOTES (cc0_setter);
14260 REG_NOTES (cc0_setter) = NULL;
14261
14262 distribute_notes (old_notes, cc0_setter,
14263 cc0_setter, NULL,
14264 NULL_RTX, NULL_RTX, NULL_RTX);
14265 distribute_links (LOG_LINKS (cc0_setter));
14266
14267 SET_INSN_DELETED (cc0_setter);
14268 if (cc0_setter == i2)
14269 i2 = NULL;
14270 }
14271 }
14272 else
14273 {
14274 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14275
14276 /* If there isn't already a REG_UNUSED note, put one
14277 here. Do not place a REG_DEAD note, even if
14278 the register is also used here; that would not
14279 match the algorithm used in lifetime analysis
14280 and can cause the consistency check in the
14281 scheduler to fail. */
14282 if (! find_regno_note (tem_insn, REG_UNUSED,
14283 REGNO (XEXP (note, 0))))
14284 place = tem_insn;
14285 break;
14286 }
14287 }
14288 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14289 || (CALL_P (tem_insn)
14290 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14291 {
14292 place = tem_insn;
14293
14294 /* If we are doing a 3->2 combination, and we have a
14295 register which formerly died in i3 and was not used
14296 by i2, which now no longer dies in i3 and is used in
14297 i2 but does not die in i2, and place is between i2
14298 and i3, then we may need to move a link from place to
14299 i2. */
14300 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14301 && from_insn
14302 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14303 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14304 {
14305 struct insn_link *links = LOG_LINKS (place);
14306 LOG_LINKS (place) = NULL;
14307 distribute_links (links);
14308 }
14309 break;
14310 }
14311
14312 if (tem_insn == BB_HEAD (bb))
14313 break;
14314 }
14315
14316 }
14317
14318 /* If the register is set or already dead at PLACE, we needn't do
14319 anything with this note if it is still a REG_DEAD note.
14320 We check here if it is set at all, not if is it totally replaced,
14321 which is what `dead_or_set_p' checks, so also check for it being
14322 set partially. */
14323
14324 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14325 {
14326 unsigned int regno = REGNO (XEXP (note, 0));
14327 reg_stat_type *rsp = &reg_stat[regno];
14328
14329 if (dead_or_set_p (place, XEXP (note, 0))
14330 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14331 {
14332 /* Unless the register previously died in PLACE, clear
14333 last_death. [I no longer understand why this is
14334 being done.] */
14335 if (rsp->last_death != place)
14336 rsp->last_death = 0;
14337 place = 0;
14338 }
14339 else
14340 rsp->last_death = place;
14341
14342 /* If this is a death note for a hard reg that is occupying
14343 multiple registers, ensure that we are still using all
14344 parts of the object. If we find a piece of the object
14345 that is unused, we must arrange for an appropriate REG_DEAD
14346 note to be added for it. However, we can't just emit a USE
14347 and tag the note to it, since the register might actually
14348 be dead; so we recourse, and the recursive call then finds
14349 the previous insn that used this register. */
14350
14351 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14352 {
14353 unsigned int endregno = END_REGNO (XEXP (note, 0));
14354 bool all_used = true;
14355 unsigned int i;
14356
14357 for (i = regno; i < endregno; i++)
14358 if ((! refers_to_regno_p (i, PATTERN (place))
14359 && ! find_regno_fusage (place, USE, i))
14360 || dead_or_set_regno_p (place, i))
14361 {
14362 all_used = false;
14363 break;
14364 }
14365
14366 if (! all_used)
14367 {
14368 /* Put only REG_DEAD notes for pieces that are
14369 not already dead or set. */
14370
14371 for (i = regno; i < endregno;
14372 i += hard_regno_nregs[i][reg_raw_mode[i]])
14373 {
14374 rtx piece = regno_reg_rtx[i];
14375 basic_block bb = this_basic_block;
14376
14377 if (! dead_or_set_p (place, piece)
14378 && ! reg_bitfield_target_p (piece,
14379 PATTERN (place)))
14380 {
14381 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14382 NULL_RTX);
14383
14384 distribute_notes (new_note, place, place,
14385 NULL, NULL_RTX, NULL_RTX,
14386 NULL_RTX);
14387 }
14388 else if (! refers_to_regno_p (i, PATTERN (place))
14389 && ! find_regno_fusage (place, USE, i))
14390 for (tem_insn = PREV_INSN (place); ;
14391 tem_insn = PREV_INSN (tem_insn))
14392 {
14393 if (!NONDEBUG_INSN_P (tem_insn))
14394 {
14395 if (tem_insn == BB_HEAD (bb))
14396 break;
14397 continue;
14398 }
14399 if (dead_or_set_p (tem_insn, piece)
14400 || reg_bitfield_target_p (piece,
14401 PATTERN (tem_insn)))
14402 {
14403 add_reg_note (tem_insn, REG_UNUSED, piece);
14404 break;
14405 }
14406 }
14407 }
14408
14409 place = 0;
14410 }
14411 }
14412 }
14413 break;
14414
14415 default:
14416 /* Any other notes should not be present at this point in the
14417 compilation. */
14418 gcc_unreachable ();
14419 }
14420
14421 if (place)
14422 {
14423 XEXP (note, 1) = REG_NOTES (place);
14424 REG_NOTES (place) = note;
14425 }
14426
14427 if (place2)
14428 add_shallow_copy_of_reg_note (place2, note);
14429 }
14430 }
14431 \f
14432 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14433 I3, I2, and I1 to new locations. This is also called to add a link
14434 pointing at I3 when I3's destination is changed. */
14435
14436 static void
14437 distribute_links (struct insn_link *links)
14438 {
14439 struct insn_link *link, *next_link;
14440
14441 for (link = links; link; link = next_link)
14442 {
14443 rtx_insn *place = 0;
14444 rtx_insn *insn;
14445 rtx set, reg;
14446
14447 next_link = link->next;
14448
14449 /* If the insn that this link points to is a NOTE, ignore it. */
14450 if (NOTE_P (link->insn))
14451 continue;
14452
14453 set = 0;
14454 rtx pat = PATTERN (link->insn);
14455 if (GET_CODE (pat) == SET)
14456 set = pat;
14457 else if (GET_CODE (pat) == PARALLEL)
14458 {
14459 int i;
14460 for (i = 0; i < XVECLEN (pat, 0); i++)
14461 {
14462 set = XVECEXP (pat, 0, i);
14463 if (GET_CODE (set) != SET)
14464 continue;
14465
14466 reg = SET_DEST (set);
14467 while (GET_CODE (reg) == ZERO_EXTRACT
14468 || GET_CODE (reg) == STRICT_LOW_PART
14469 || GET_CODE (reg) == SUBREG)
14470 reg = XEXP (reg, 0);
14471
14472 if (!REG_P (reg))
14473 continue;
14474
14475 if (REGNO (reg) == link->regno)
14476 break;
14477 }
14478 if (i == XVECLEN (pat, 0))
14479 continue;
14480 }
14481 else
14482 continue;
14483
14484 reg = SET_DEST (set);
14485
14486 while (GET_CODE (reg) == ZERO_EXTRACT
14487 || GET_CODE (reg) == STRICT_LOW_PART
14488 || GET_CODE (reg) == SUBREG)
14489 reg = XEXP (reg, 0);
14490
14491 /* A LOG_LINK is defined as being placed on the first insn that uses
14492 a register and points to the insn that sets the register. Start
14493 searching at the next insn after the target of the link and stop
14494 when we reach a set of the register or the end of the basic block.
14495
14496 Note that this correctly handles the link that used to point from
14497 I3 to I2. Also note that not much searching is typically done here
14498 since most links don't point very far away. */
14499
14500 for (insn = NEXT_INSN (link->insn);
14501 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14502 || BB_HEAD (this_basic_block->next_bb) != insn));
14503 insn = NEXT_INSN (insn))
14504 if (DEBUG_INSN_P (insn))
14505 continue;
14506 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14507 {
14508 if (reg_referenced_p (reg, PATTERN (insn)))
14509 place = insn;
14510 break;
14511 }
14512 else if (CALL_P (insn)
14513 && find_reg_fusage (insn, USE, reg))
14514 {
14515 place = insn;
14516 break;
14517 }
14518 else if (INSN_P (insn) && reg_set_p (reg, insn))
14519 break;
14520
14521 /* If we found a place to put the link, place it there unless there
14522 is already a link to the same insn as LINK at that point. */
14523
14524 if (place)
14525 {
14526 struct insn_link *link2;
14527
14528 FOR_EACH_LOG_LINK (link2, place)
14529 if (link2->insn == link->insn && link2->regno == link->regno)
14530 break;
14531
14532 if (link2 == NULL)
14533 {
14534 link->next = LOG_LINKS (place);
14535 LOG_LINKS (place) = link;
14536
14537 /* Set added_links_insn to the earliest insn we added a
14538 link to. */
14539 if (added_links_insn == 0
14540 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14541 added_links_insn = place;
14542 }
14543 }
14544 }
14545 }
14546 \f
14547 /* Check for any register or memory mentioned in EQUIV that is not
14548 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14549 of EXPR where some registers may have been replaced by constants. */
14550
14551 static bool
14552 unmentioned_reg_p (rtx equiv, rtx expr)
14553 {
14554 subrtx_iterator::array_type array;
14555 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14556 {
14557 const_rtx x = *iter;
14558 if ((REG_P (x) || MEM_P (x))
14559 && !reg_mentioned_p (x, expr))
14560 return true;
14561 }
14562 return false;
14563 }
14564 \f
14565 DEBUG_FUNCTION void
14566 dump_combine_stats (FILE *file)
14567 {
14568 fprintf
14569 (file,
14570 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14571 combine_attempts, combine_merges, combine_extras, combine_successes);
14572 }
14573
14574 void
14575 dump_combine_total_stats (FILE *file)
14576 {
14577 fprintf
14578 (file,
14579 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14580 total_attempts, total_merges, total_extras, total_successes);
14581 }
14582 \f
14583 /* Try combining insns through substitution. */
14584 static unsigned int
14585 rest_of_handle_combine (void)
14586 {
14587 int rebuild_jump_labels_after_combine;
14588
14589 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14590 df_note_add_problem ();
14591 df_analyze ();
14592
14593 regstat_init_n_sets_and_refs ();
14594 reg_n_sets_max = max_reg_num ();
14595
14596 rebuild_jump_labels_after_combine
14597 = combine_instructions (get_insns (), max_reg_num ());
14598
14599 /* Combining insns may have turned an indirect jump into a
14600 direct jump. Rebuild the JUMP_LABEL fields of jumping
14601 instructions. */
14602 if (rebuild_jump_labels_after_combine)
14603 {
14604 if (dom_info_available_p (CDI_DOMINATORS))
14605 free_dominance_info (CDI_DOMINATORS);
14606 timevar_push (TV_JUMP);
14607 rebuild_jump_labels (get_insns ());
14608 cleanup_cfg (0);
14609 timevar_pop (TV_JUMP);
14610 }
14611
14612 regstat_free_n_sets_and_refs ();
14613 return 0;
14614 }
14615
14616 namespace {
14617
14618 const pass_data pass_data_combine =
14619 {
14620 RTL_PASS, /* type */
14621 "combine", /* name */
14622 OPTGROUP_NONE, /* optinfo_flags */
14623 TV_COMBINE, /* tv_id */
14624 PROP_cfglayout, /* properties_required */
14625 0, /* properties_provided */
14626 0, /* properties_destroyed */
14627 0, /* todo_flags_start */
14628 TODO_df_finish, /* todo_flags_finish */
14629 };
14630
14631 class pass_combine : public rtl_opt_pass
14632 {
14633 public:
14634 pass_combine (gcc::context *ctxt)
14635 : rtl_opt_pass (pass_data_combine, ctxt)
14636 {}
14637
14638 /* opt_pass methods: */
14639 virtual bool gate (function *) { return (optimize > 0); }
14640 virtual unsigned int execute (function *)
14641 {
14642 return rest_of_handle_combine ();
14643 }
14644
14645 }; // class pass_combine
14646
14647 } // anon namespace
14648
14649 rtl_opt_pass *
14650 make_pass_combine (gcc::context *ctxt)
14651 {
14652 return new pass_combine (ctxt);
14653 }